Contains the code to balance classes and create the training/val/test sets
# For Python 3.7
#!pip install keras tensorflow matplotlib numpy pandas imblearn split-folders scikit-learn tensorflow-addons scikeras[tensorflow]
# For Python 3.10
#!pip install "keras==2.10.0" "tensorflow<2.11" matplotlib numpy pandas imblearn split-folders scikit-learn "tensorflow-addons<2.11"
#!pip install --no-deps keras-tuner
# Set this variable to the root path of where the files are located
%env DATA_LOCATION=P:\CODE\ITESM\tesis-dataset-downloader\solanum_output
%env CLEAN_DATA_FOLDER=z_clean_resized
%env CLEAN_SPLIT_DATA_FOLDER=z_clean_resized_split
# Allow an async memory allocation to avoid OOM errors when using multiple models
#%env TF_GPU_ALLOCATOR=cuda_malloc_async
%env TF_FORCE_GPU_ALLOW_GROWTH=true
!echo %DATA_LOCATION%
#!dir %DATA_LOCATION%
# Constants
import os
RANDOM_SEED = 1988
DATA_ROOT_LOCATION = os.environ["DATA_LOCATION"]
CLEAN_DATA_FOLDER = os.environ["CLEAN_DATA_FOLDER"]
CLEAN_SPLIT_DATA_FOLDER = os.environ["CLEAN_SPLIT_DATA_FOLDER"]
env: DATA_LOCATION=P:\CODE\ITESM\tesis-dataset-downloader\solanum_output env: CLEAN_DATA_FOLDER=z_clean_resized env: CLEAN_SPLIT_DATA_FOLDER=z_clean_resized_split env: TF_FORCE_GPU_ALLOW_GROWTH=true P:\CODE\ITESM\tesis-dataset-downloader\solanum_output
%matplotlib inline
import tensorflow as tf
from tensorflow.keras.utils import img_to_array
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow_addons as tfa
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
from numpy import array
import pandas as pd
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
import splitfolders
from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay
from sklearn.model_selection import KFold, ShuffleSplit
from PIL import Image
import os
import time
from datetime import datetime
import random
# Enable 3rd Party Jupyter Widgets in Google Collab
#from google.colab import output
#output.enable_custom_widget_manager()
def get_training_device_name():
name = tf.test.gpu_device_name()
if "GPU" not in name:
print("No GPU was found!, training will be done in the CPU which will be slower")
name = '/cpu:0'
else:
print('Found GPU at: {}'.format(name))
return name
TRAINING_DEVICE_NAME = get_training_device_name()
random.seed(RANDOM_SEED)
tf.keras.utils.set_random_seed(
RANDOM_SEED
)
Found GPU at: /device:GPU:0
def print_marquee(msg: str):
"""
Prints a centered message with a marquee of *
"""
marquee_width = len(msg) + 4
print("\n")
print("*" * marquee_width)
print(f"* {msg} *")
print("*" * marquee_width)
def count_rows_by_column(df, column):
return df.groupby([column]).size().reset_index(name='count').sort_values("count", ascending=False)
def model_2_pkl(model, filename: str):
"""Exports a model to PKL format"""
pickle.dump(model, open(filename, 'wb'))
def pkl_2_model(filename: str):
"""Loads a model from a PKL file"""
return pickle.load(open(filename, 'rb'))
def get_class_names(generator) -> list:
return [k for k, _ in sorted(generator.class_indices.items(), key = lambda item: item[1])]
def generate_id() -> str:
# Generates an ID in the shape of YYYYMMDDhhmmss to be used in models and files
return datetime.today().strftime('%Y%m%d%H%M%S')
def show_images(df: pd.DataFrame,
labels_col_name="section",
rows=2,
cols=2,
samples_per_label=1,
figsize=(15,15)):
sections = np.sort(df[labels_col_name].unique())
for section in sections:
parent_fig = plt.figure(section, figsize=figsize)
plt.title(section)
plt.axis("off")
samples = df[df[labels_col_name]==section].sample(samples_per_label,
random_state=RANDOM_SEED)
samples = [Image.open(path) for path in samples["full_path"]]
grid = ImageGrid(parent_fig,
rect=111, # This will set the axes position as (left, bottom, width, height) tuple or as a three-digit subplot position code, it’s 111 in our case
nrows_ncols=(rows, cols), # creates rows,cols grid of axes
axes_pad=0.1, # Padding between pictures
)
for ax, im in zip(grid, samples):
ax.imshow(im)
ax.axis("off")
plt.show()
images_df = pd.read_csv(os.path.join(DATA_ROOT_LOCATION, CLEAN_DATA_FOLDER, "images_dedup_512x512_100picspersection.csv"))
# Update all paths to conform to the local structure (Only if running in a UN*X environment)
# 'P:/CODE/ITESM/tesis-dataset-downloader/solanum_output/z_clean_resized/acanthophora/acanthophora_acerifolium_1928496814_gbif_2700.jpg'
# path_to_replace = "P:/CODE/ITESM/tesis-dataset-downloader/solanum_output/z_clean_resized"
# images_df["full_path"] = images_df["full_path"].str.replace(path_to_replace, data_root_location)
display(images_df.describe(include="all"))
| section | species | filesize_mb | image_type | source | full_path | |
|---|---|---|---|---|---|---|
| count | 8515 | 8515 | 8515.000000 | 8515 | 8515 | 8515 |
| unique | 12 | 112 | NaN | 1 | 2 | 8515 |
| top | petota | lanceolatum | NaN | jpg | gbif | P:/CODE/ITESM/tesis-dataset-downloader/solanum... |
| freq | 1733 | 533 | NaN | 8515 | 6103 | 1 |
| mean | NaN | NaN | 0.029506 | NaN | NaN | NaN |
| std | NaN | NaN | 0.005773 | NaN | NaN | NaN |
| min | NaN | NaN | 0.012600 | NaN | NaN | NaN |
| 25% | NaN | NaN | 0.025500 | NaN | NaN | NaN |
| 50% | NaN | NaN | 0.029200 | NaN | NaN | NaN |
| 75% | NaN | NaN | 0.033200 | NaN | NaN | NaN |
| max | NaN | NaN | 0.062900 | NaN | NaN | NaN |
print_marquee("Classes")
count_per_section = count_rows_by_column(images_df, "section")
display(count_per_section)
display(count_per_section.describe())
print_marquee("Info")
display(images_df.info())
*********** * Classes * ***********
| section | count | |
|---|---|---|
| 9 | petota | 1733 |
| 5 | holophylla | 1449 |
| 7 | melongena | 1442 |
| 11 | torva | 1332 |
| 2 | brevantherum | 811 |
| 10 | solanum | 577 |
| 3 | dulcamara | 389 |
| 4 | herposolanum | 224 |
| 8 | micracantha | 180 |
| 6 | lasiocarpa | 178 |
| 0 | acanthophora | 100 |
| 1 | anarrhichomenum | 100 |
| count | |
|---|---|
| count | 12.000000 |
| mean | 709.583333 |
| std | 617.223027 |
| min | 100.000000 |
| 25% | 179.500000 |
| 50% | 483.000000 |
| 75% | 1359.500000 |
| max | 1733.000000 |
******** * Info * ******** <class 'pandas.core.frame.DataFrame'> RangeIndex: 8515 entries, 0 to 8514 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 section 8515 non-null object 1 species 8515 non-null object 2 filesize_mb 8515 non-null float64 3 image_type 8515 non-null object 4 source 8515 non-null object 5 full_path 8515 non-null object dtypes: float64(1), object(5) memory usage: 399.3+ KB
None
Given that we will sample/undersample our dataset, we need to remove 10% of the classes for testing so that we don't contaminate our results and have the model train on test samples, the rest that we get will be balanced.
# Iterate over each of the classes and use train test split to sample out 20% of each class
# put this 90% and 10% into new DataFrames
TEST_FRACTION = 0.1
def split_balanced_dataset(original_images_df, test_fraction=TEST_FRACTION, is_verbose=True):
training_set = pd.DataFrame()
test_set = pd.DataFrame()
for section in original_images_df["section"].unique():
images_for_section = original_images_df[original_images_df["section"] == section]
test_set_section = images_for_section.sample(
frac=test_fraction,
random_state=RANDOM_SEED,
replace=False
)
# Remove the test indices from the train set
training_set_section = images_for_section.drop(test_set_section.index)
if is_verbose:
print_marquee(section)
print(f"Before sampling: {images_for_section.shape}")
print(f"Training set for section: {training_set_section.shape}")
print(f"Test set for section: {test_set_section.shape}")
training_set = pd.concat([training_set, training_set_section])
test_set = pd.concat([test_set, test_set_section])
# Shuffle the training set to avoid issues with CV later
training_set = training_set.sample(frac=1,
random_state=RANDOM_SEED).reset_index(drop=True)
return training_set, test_set
training_set, test_set = split_balanced_dataset(images_df)
print(f">>>Train DF: {training_set.shape}\n Test DF: {test_set.shape}")
**************** * acanthophora * **************** Before sampling: (100, 6) Training set for section: (90, 6) Test set for section: (10, 6) ******************* * anarrhichomenum * ******************* Before sampling: (100, 6) Training set for section: (90, 6) Test set for section: (10, 6) **************** * brevantherum * **************** Before sampling: (811, 6) Training set for section: (730, 6) Test set for section: (81, 6) ************* * dulcamara * ************* Before sampling: (389, 6) Training set for section: (350, 6) Test set for section: (39, 6) **************** * herposolanum * **************** Before sampling: (224, 6) Training set for section: (202, 6) Test set for section: (22, 6) ************** * holophylla * ************** Before sampling: (1449, 6) Training set for section: (1304, 6) Test set for section: (145, 6) ************** * lasiocarpa * ************** Before sampling: (178, 6) Training set for section: (160, 6) Test set for section: (18, 6) ************* * melongena * ************* Before sampling: (1442, 6) Training set for section: (1298, 6) Test set for section: (144, 6) *************** * micracantha * *************** Before sampling: (180, 6) Training set for section: (162, 6) Test set for section: (18, 6) ********** * petota * ********** Before sampling: (1733, 6) Training set for section: (1560, 6) Test set for section: (173, 6) *********** * solanum * *********** Before sampling: (577, 6) Training set for section: (519, 6) Test set for section: (58, 6) ********* * torva * ********* Before sampling: (1332, 6) Training set for section: (1199, 6) Test set for section: (133, 6) >>>Train DF: (7664, 6) Test DF: (851, 6)
print_marquee("Training")
training_count_per_section = count_rows_by_column(training_set, "section")
display(training_count_per_section)
print_marquee("Info")
display(training_count_per_section.describe())
display(training_set.info())
print_marquee("Test")
test_count_per_section = count_rows_by_column(test_set, "section")
display(test_count_per_section)
print_marquee("Info")
display(test_set.info())
************ * Training * ************
| section | count | |
|---|---|---|
| 9 | petota | 1560 |
| 5 | holophylla | 1304 |
| 7 | melongena | 1298 |
| 11 | torva | 1199 |
| 2 | brevantherum | 730 |
| 10 | solanum | 519 |
| 3 | dulcamara | 350 |
| 4 | herposolanum | 202 |
| 8 | micracantha | 162 |
| 6 | lasiocarpa | 160 |
| 0 | acanthophora | 90 |
| 1 | anarrhichomenum | 90 |
******** * Info * ********
| count | |
|---|---|
| count | 12.000000 |
| mean | 638.666667 |
| std | 555.574123 |
| min | 90.000000 |
| 25% | 161.500000 |
| 50% | 434.500000 |
| 75% | 1223.750000 |
| max | 1560.000000 |
<class 'pandas.core.frame.DataFrame'> RangeIndex: 7664 entries, 0 to 7663 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 section 7664 non-null object 1 species 7664 non-null object 2 filesize_mb 7664 non-null float64 3 image_type 7664 non-null object 4 source 7664 non-null object 5 full_path 7664 non-null object dtypes: float64(1), object(5) memory usage: 359.4+ KB
None
******** * Test * ********
| section | count | |
|---|---|---|
| 9 | petota | 173 |
| 5 | holophylla | 145 |
| 7 | melongena | 144 |
| 11 | torva | 133 |
| 2 | brevantherum | 81 |
| 10 | solanum | 58 |
| 3 | dulcamara | 39 |
| 4 | herposolanum | 22 |
| 6 | lasiocarpa | 18 |
| 8 | micracantha | 18 |
| 0 | acanthophora | 10 |
| 1 | anarrhichomenum | 10 |
******** * Info * ******** <class 'pandas.core.frame.DataFrame'> Int64Index: 851 entries, 77 to 8054 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 section 851 non-null object 1 species 851 non-null object 2 filesize_mb 851 non-null float64 3 image_type 851 non-null object 4 source 851 non-null object 5 full_path 851 non-null object dtypes: float64(1), object(5) memory usage: 46.5+ KB
None
We can see that there are classes that are overrepresented, so we need to do some undersampling and oversampling in order for the model to better learn and classify.
The average count for the samples in the training set is 638.88 while the median is 434 (listed as the 2nd quartile or 50%), so the best strategy that will be employed is to randomly undersample anything above 500 and oversample anything below that level.
# As an experiment, we will train the model with just the pictures that have
# at least 1000 samples to debug why we are not seeing the neural network learn
# and ignore the sections that have less than that
#SAMPLE_SIZE = 1000
#sections_with_1000 = list(training_count_per_section[training_count_per_section["count"] >= SAMPLE_SIZE]["section"].values)
#balanced_training_data = training_set[training_set["section"].isin(sections_with_1000)].reset_index()
#display(count_rows_by_column(balanced_training_data, "section"))
# https://imbalanced-learn.org/stable/
SAMPLE_SIZE = 1500
sections_to_oversample = list(training_count_per_section[training_count_per_section["count"] < SAMPLE_SIZE]["section"].values)
sections_to_undersample = list(training_count_per_section[training_count_per_section["count"] > SAMPLE_SIZE]["section"].values)
undersampler = RandomUnderSampler(sampling_strategy={k: SAMPLE_SIZE for k in sections_to_undersample},
random_state=RANDOM_SEED)
oversampler = RandomOverSampler(sampling_strategy={k: SAMPLE_SIZE for k in sections_to_oversample},
random_state=RANDOM_SEED)
def imbsample(imbsampler, df):
"""Applies a sampling strategy to a Pandas DataFrame using imblearn
The imblearn only accepts the features and labels separately, so let's join
the columns after the operations to get a single DataFrame
:returns the df with sampled data
"""
X, y = imbsampler.fit_resample(df.drop("section", axis=1), df["section"])
return pd.concat([X, y], axis=1)
balanced_training_data = imbsample(oversampler, training_set)
balanced_training_data = imbsample(undersampler, balanced_training_data)
# Shuffle the data
balanced_training_data = balanced_training_data.sample(frac=1,
random_state=RANDOM_SEED).reset_index(drop=True)
display(balanced_training_data.head(10))
display(count_rows_by_column(balanced_training_data, "section"))
| species | filesize_mb | image_type | source | full_path | section | |
|---|---|---|---|---|---|---|
| 0 | hirtum | 0.0315 | jpg | gbif | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | lasiocarpa |
| 1 | erianthum | 0.0270 | jpg | idigbio | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | brevantherum |
| 2 | lanceolatum | 0.0311 | jpg | idigbio | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | torva |
| 3 | pubigerum | 0.0234 | jpg | gbif | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | holophylla |
| 4 | nudum | 0.0367 | jpg | gbif | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | holophylla |
| 5 | myriacanthum | 0.0445 | jpg | gbif | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | acanthophora |
| 6 | pubigerum | 0.0309 | jpg | idigbio | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | holophylla |
| 7 | refractum | 0.0360 | jpg | gbif | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | herposolanum |
| 8 | rostratum | 0.0313 | jpg | idigbio | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | melongena |
| 9 | oxycarpum | 0.0430 | jpg | gbif | P:/CODE/ITESM/tesis-dataset-downloader/solanum... | petota |
| section | count | |
|---|---|---|
| 0 | acanthophora | 1500 |
| 1 | anarrhichomenum | 1500 |
| 2 | brevantherum | 1500 |
| 3 | dulcamara | 1500 |
| 4 | herposolanum | 1500 |
| 5 | holophylla | 1500 |
| 6 | lasiocarpa | 1500 |
| 7 | melongena | 1500 |
| 8 | micracantha | 1500 |
| 9 | petota | 1500 |
| 10 | solanum | 1500 |
| 11 | torva | 1500 |
show_images(balanced_training_data,
rows=3,
cols=3,
figsize=(7, 7),
samples_per_label=9)
Output hidden; open in https://colab.research.google.com to view.
TARGET_SIZE = (224, 224)
# BATCH_SIZE = 32
BATCH_SIZE = 16
COLOR_MODE = "rgb"
X_COL = "full_path"
Y_COL = "section"
image_data_gen_params = {
"target_size": TARGET_SIZE,
"color_mode": COLOR_MODE,
"class_mode": "categorical",
"seed": RANDOM_SEED,
"save_prefix": 'augmented_',
"save_format": 'png',
}
def get_train_generator(preprocessing_func, training_df, batch_size=BATCH_SIZE):
train_datagen = ImageDataGenerator(
rescale=1.0/255, # Normalize the data to be 0-1
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
preprocessing_function=preprocessing_func)
# return train_datagen.flow_from_directory(
# os.path.join(DATA_ROOT_LOCATION, CLEAN_SPLIT_DATA_FOLDER, "train"),
# # save_to_dir=os.path.join(DATA_ROOT_LOCATION, CLEAN_SPLIT_DATA_FOLDER, "train"),
# **image_data_gen_params)
return train_datagen.flow_from_dataframe(
training_df,
x_col=X_COL,
y_col=Y_COL,
batch_size=batch_size,
# save_to_dir=os.path.join(DATA_ROOT_LOCATION, CLEAN_SPLIT_DATA_FOLDER, "train"),
shuffle=True,
**image_data_gen_params)
def get_val_generator(df, preprocessing_func, batch_size=BATCH_SIZE):
test_datagen = ImageDataGenerator(
rescale=1.0/255,
preprocessing_function=preprocessing_func,
)
return test_datagen.flow_from_dataframe(
df,
# save_to_dir=os.path.join(DATA_ROOT_LOCATION, CLEAN_SPLIT_DATA_FOLDER, "val"),
x_col=X_COL,
y_col=Y_COL,
shuffle=False,
batch_size=batch_size,
**image_data_gen_params)
We will perform Cross-Validation = 4 to find the best architecture and with it, we'll train again on all of the training set and proceed to validate it on the test set.
CV_FOLDS = 4
NUM_CLASSES = len(balanced_training_data["section"].unique())
VAL_SIZE = 0.20
TRAINING_RUN_ID = generate_id()
def train_model(model_to_fit: tf.keras.models.Model,
fit_params: dict,
preproc_func):
t = time.process_time()
print_marquee(f"Started at {t}...")
batch_size = fit_params.get("batch_size", BATCH_SIZE)
# Transform from DataFrames to File System Generator
# Make a copy of fit_params so as not to edit the original dictionary
fit_params = fit_params.copy()
fit_params["x"] = get_train_generator(preproc_func,
fit_params["x"],
batch_size=batch_size)
fit_params["validation_data"] = get_val_generator(fit_params["validation_data"],
preproc_func,
batch_size)
trained_model_history = model_to_fit.fit(**fit_params)
elapsed_time = time.process_time() - t
print(f"\n\n ********* Training time: {elapsed_time} s.")
return trained_model_history
def evaluate_model(trained_model: tf.keras.models.Model,
trained_model_history,
fit_params: dict,
preproc_func,
print_loss_graphs: bool=True,
print_model_info: bool=True) -> tf.keras.models.Model:
if print_model_info:
print_marquee("Model Summary")
trained_model.summary()
batch_size = fit_params.get("batch_size", BATCH_SIZE)
print_marquee("Validation Dataset Confusion Matrix")
val_gen = get_val_generator(fit_params["validation_data"],
preproc_func,
batch_size=batch_size)
val_model_predictions = trained_model.predict(val_gen,
#steps=val_gen.n // (val_gen.batch_size + 1)
)
pred_report = print_dataset_prediction_report(val_model_predictions,
val_gen.classes,
get_class_names(val_gen),
print_cm_as_percentages=True)
if print_loss_graphs:
print_marquee("Train/Val Accuracy and Loss graphs")
# If using early stopping, it might be the case that we used less epochs than
# requested
subtitle = f"Epochs: {len(trained_model_history.history['accuracy'])}"
graph_loss_accuracy(trained_model_history,
subtitle=subtitle)
return val_model_predictions, pred_report
# Helper functions for reporting
def print_dataset_prediction_report(y_pred,
y_real,
labels: list=None,
print_cm_as_percentages: bool=False):
# y_pred = np.round(y_pred).astype(int)
y_pred = np.argmax(y_pred, axis=1)
print_marquee("Classification Report")
class_report_str = classification_report(y_real,
y_pred,
target_names=labels,
digits=4)
class_report_dict = classification_report(y_real,
y_pred,
target_names=labels,
output_dict=True)
print(class_report_str)
print_marquee("Confusion Matrix")
cm_data = confusion_matrix(y_real,y_pred)
cm_display = ConfusionMatrixDisplay(confusion_matrix=cm_data,
display_labels=labels)
cm_display.plot(xticks_rotation="vertical")
plt.show()
if print_cm_as_percentages:
print_marquee("Confusion Matrix as percentages")
cm_data_perc = np.round(cm_data/np.sum(cm_data), 2)
cm_display = ConfusionMatrixDisplay(confusion_matrix=cm_data_perc,
display_labels=labels)
cm_display.plot(xticks_rotation="vertical")
plt.show()
return class_report_dict
def graph_loss_accuracy(h_model,
subtitle: str = ""):
num_records = len(h_model.history["accuracy"])
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0,num_records),h_model.history["accuracy"],label="train_acc")
plt.plot(np.arange(0,num_records),h_model.history["val_accuracy"],label="val_acc")
plt.title("Training and Validation Accuracy" + f" ({subtitle})" if subtitle else "")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend()
plt.figure()
plt.plot(np.arange(0,num_records),h_model.history["loss"],label="train_loss")
plt.plot(np.arange(0,num_records),h_model.history["val_loss"],label="val_loss")
plt.title("Training and Validation Loss" + f" ({subtitle})" if subtitle else "")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
def create_model_checkpoint(filepath: str) -> tf.keras.callbacks.ModelCheckpoint:
return tf.keras.callbacks.ModelCheckpoint(
filepath,
monitor = 'val_accuracy',
verbose = 1,
save_best_only = True,
save_weights_only = False,
mode = 'auto',
# save_freq = 1,
)
def calculate_cv_mean_metrics(cv_val_predictions):
print_marquee(f"Mean metrics across {len(cv_val_predictions)} folds")
display(pd.json_normalize(cv_val_predictions).mean().to_frame())
%%time
#kf = KFold(n_splits=CV_FOLDS,
# random_state=RANDOM_SEED,
# shuffle=True)
kf = ShuffleSplit(n_splits=CV_FOLDS,
test_size=VAL_SIZE,
random_state=RANDOM_SEED)
split = 1
vgg16_cv_val_pred = []
for train_index, val_index in kf.split(balanced_training_data):
vgg16_tf_model = tf.keras.applications.vgg16.VGG16(
include_top=True,
weights=None,
pooling="avg",
classes=NUM_CLASSES,
classifier_activation='softmax',
)
# Add an optimizer
vgg16_tf_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.003),
loss='categorical_crossentropy',
metrics=['accuracy'])
#vgg16_tf_model.compile(optimizer=tf.optimizers.SGD(learning_rate=0.003, momentum=0.9),
# loss='categorical_crossentropy',
# metrics=['accuracy'])
# Add a progress bar and save checkpoints
vgg16_callbacks = [
create_model_checkpoint(os.path.join(DATA_ROOT_LOCATION, f"vgg16_{TRAINING_RUN_ID}_{split}.h5")),
tf.keras.callbacks.ProgbarLogger(
count_mode = 'steps',
stateful_metrics = None
),
tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy',
min_delta=0,
patience=40,
verbose=1,
mode='auto',
restore_best_weights=True
)
]
training_split_data = balanced_training_data.iloc[train_index]
val_split_data = balanced_training_data.iloc[val_index]
fit_params = {
"x": training_split_data,
"epochs": 105,
"callbacks": vgg16_callbacks,
"validation_data": val_split_data,
"steps_per_epoch": 128,
"validation_steps": 10,
}
preproc_func = tf.keras.applications.vgg16.preprocess_input
with tf.device(TRAINING_DEVICE_NAME):
vgg16_training_history = train_model(vgg16_tf_model,
fit_params=fit_params,
preproc_func=preproc_func)
_, pred_report = evaluate_model(vgg16_tf_model,
vgg16_training_history,
fit_params=fit_params,
preproc_func=preproc_func)
vgg16_cv_val_pred.append(pred_report)
split += 1
calculate_cv_mean_metrics(vgg16_cv_val_pred)
***************************
* Started at 10.734375... *
***************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
128/128 [==============================] - ETA: 0s - loss: 409.0229 - accuracy: 0.0767
Epoch 1: val_accuracy improved from -inf to 0.05625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_1.h5
128/128 [==============================] - 39s 234ms/step - loss: 409.0229 - accuracy: 0.0767 - val_loss: 2.7961 - val_accuracy: 0.0562
Epoch 2/105
128/128 [==============================] - ETA: 0s - loss: 2.5968 - accuracy: 0.0898
Epoch 2: val_accuracy improved from 0.05625 to 0.06250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_1.h5
128/128 [==============================] - 32s 249ms/step - loss: 2.5968 - accuracy: 0.0898 - val_loss: 2.4935 - val_accuracy: 0.0625
Epoch 3/105
128/128 [==============================] - ETA: 0s - loss: 2.4866 - accuracy: 0.0864
Epoch 3: val_accuracy improved from 0.06250 to 0.08750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_1.h5
128/128 [==============================] - 28s 218ms/step - loss: 2.4866 - accuracy: 0.0864 - val_loss: 2.4908 - val_accuracy: 0.0875
Epoch 4/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0806
Epoch 4: val_accuracy did not improve from 0.08750
128/128 [==============================] - 13s 102ms/step - loss: 2.4859 - accuracy: 0.0806 - val_loss: 2.4873 - val_accuracy: 0.0875
Epoch 5/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0781
Epoch 5: val_accuracy improved from 0.08750 to 0.10000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_1.h5
128/128 [==============================] - 35s 276ms/step - loss: 2.4860 - accuracy: 0.0781 - val_loss: 2.4851 - val_accuracy: 0.1000
Epoch 6/105
128/128 [==============================] - ETA: 0s - loss: 2.4852 - accuracy: 0.0771
Epoch 6: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4852 - accuracy: 0.0771 - val_loss: 2.4871 - val_accuracy: 0.1000
Epoch 7/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0796
Epoch 7: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4857 - accuracy: 0.0796 - val_loss: 2.4843 - val_accuracy: 0.0625
Epoch 8/105
128/128 [==============================] - ETA: 0s - loss: 2.4848 - accuracy: 0.0830
Epoch 8: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4848 - accuracy: 0.0830 - val_loss: 2.4841 - val_accuracy: 0.0875
Epoch 9/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0825
Epoch 9: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4857 - accuracy: 0.0825 - val_loss: 2.4842 - val_accuracy: 0.1000
Epoch 10/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0786
Epoch 10: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4855 - accuracy: 0.0786 - val_loss: 2.4882 - val_accuracy: 0.0500
Epoch 11/105
128/128 [==============================] - ETA: 0s - loss: 2.4864 - accuracy: 0.0806
Epoch 11: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4864 - accuracy: 0.0806 - val_loss: 2.4875 - val_accuracy: 0.0625
Epoch 12/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0767
Epoch 12: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4854 - accuracy: 0.0767 - val_loss: 2.4860 - val_accuracy: 0.0688
Epoch 13/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0781
Epoch 13: val_accuracy improved from 0.10000 to 0.10625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_1.h5
128/128 [==============================] - 27s 211ms/step - loss: 2.4853 - accuracy: 0.0781 - val_loss: 2.4850 - val_accuracy: 0.1063
Epoch 14/105
128/128 [==============================] - ETA: 0s - loss: 2.4849 - accuracy: 0.0830
Epoch 14: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4849 - accuracy: 0.0830 - val_loss: 2.4836 - val_accuracy: 0.1063
Epoch 15/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0801
Epoch 15: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4857 - accuracy: 0.0801 - val_loss: 2.4856 - val_accuracy: 0.1063
Epoch 16/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0806
Epoch 16: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4865 - accuracy: 0.0806 - val_loss: 2.4862 - val_accuracy: 0.0625
Epoch 17/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0859
Epoch 17: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4853 - accuracy: 0.0859 - val_loss: 2.4884 - val_accuracy: 0.0875
Epoch 18/105
128/128 [==============================] - ETA: 0s - loss: 2.4848 - accuracy: 0.0933
Epoch 18: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4848 - accuracy: 0.0933 - val_loss: 2.4852 - val_accuracy: 0.1000
Epoch 19/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0830
Epoch 19: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4854 - accuracy: 0.0830 - val_loss: 2.4862 - val_accuracy: 0.1063
Epoch 20/105
128/128 [==============================] - ETA: 0s - loss: 2.4847 - accuracy: 0.0845
Epoch 20: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4847 - accuracy: 0.0845 - val_loss: 2.4839 - val_accuracy: 0.1000
Epoch 21/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0874
Epoch 21: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4858 - accuracy: 0.0874 - val_loss: 2.4861 - val_accuracy: 0.1000
Epoch 22/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0835
Epoch 22: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0835 - val_loss: 2.4837 - val_accuracy: 0.1063
Epoch 23/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0874
Epoch 23: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4859 - accuracy: 0.0874 - val_loss: 2.4832 - val_accuracy: 0.1063
Epoch 24/105
128/128 [==============================] - ETA: 0s - loss: 2.4863 - accuracy: 0.0752
Epoch 24: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4863 - accuracy: 0.0752 - val_loss: 2.4864 - val_accuracy: 0.0562
Epoch 25/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0762
Epoch 25: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4854 - accuracy: 0.0762 - val_loss: 2.4860 - val_accuracy: 0.1000
Epoch 26/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0801
Epoch 26: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4859 - accuracy: 0.0801 - val_loss: 2.4842 - val_accuracy: 0.0812
Epoch 27/105
128/128 [==============================] - ETA: 0s - loss: 2.4847 - accuracy: 0.0776
Epoch 27: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4847 - accuracy: 0.0776 - val_loss: 2.4870 - val_accuracy: 0.0562
Epoch 28/105
128/128 [==============================] - ETA: 0s - loss: 2.4852 - accuracy: 0.0830
Epoch 28: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4852 - accuracy: 0.0830 - val_loss: 2.4872 - val_accuracy: 0.0875
Epoch 29/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0879
Epoch 29: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4853 - accuracy: 0.0879 - val_loss: 2.4848 - val_accuracy: 0.0875
Epoch 30/105
128/128 [==============================] - ETA: 0s - loss: 2.4843 - accuracy: 0.0806
Epoch 30: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4843 - accuracy: 0.0806 - val_loss: 2.4878 - val_accuracy: 0.0688
Epoch 31/105
128/128 [==============================] - ETA: 0s - loss: 2.4849 - accuracy: 0.0791
Epoch 31: val_accuracy improved from 0.10625 to 0.11875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_1.h5
128/128 [==============================] - 34s 268ms/step - loss: 2.4849 - accuracy: 0.0791 - val_loss: 2.4867 - val_accuracy: 0.1187
Epoch 32/105
128/128 [==============================] - ETA: 0s - loss: 2.4852 - accuracy: 0.0913
Epoch 32: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 102ms/step - loss: 2.4852 - accuracy: 0.0913 - val_loss: 2.4850 - val_accuracy: 0.0625
Epoch 33/105
128/128 [==============================] - ETA: 0s - loss: 2.4866 - accuracy: 0.0718
Epoch 33: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4866 - accuracy: 0.0718 - val_loss: 2.4848 - val_accuracy: 0.0562
Epoch 34/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0762
Epoch 34: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0762 - val_loss: 2.4841 - val_accuracy: 0.1187
Epoch 35/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0835
Epoch 35: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0835 - val_loss: 2.4865 - val_accuracy: 0.1187
Epoch 36/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0845
Epoch 36: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4857 - accuracy: 0.0845 - val_loss: 2.4863 - val_accuracy: 0.1187
Epoch 37/105
128/128 [==============================] - ETA: 0s - loss: 2.4830 - accuracy: 0.0952
Epoch 37: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4830 - accuracy: 0.0952 - val_loss: 2.4895 - val_accuracy: 0.0625
Epoch 38/105
128/128 [==============================] - ETA: 0s - loss: 2.4839 - accuracy: 0.0957
Epoch 38: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4839 - accuracy: 0.0957 - val_loss: 2.4904 - val_accuracy: 0.0625
Epoch 39/105
128/128 [==============================] - ETA: 0s - loss: 2.4841 - accuracy: 0.0928
Epoch 39: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4841 - accuracy: 0.0928 - val_loss: 2.4901 - val_accuracy: 0.0625
Epoch 40/105
128/128 [==============================] - ETA: 0s - loss: 2.4866 - accuracy: 0.0840
Epoch 40: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4866 - accuracy: 0.0840 - val_loss: 2.4857 - val_accuracy: 0.0625
Epoch 41/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0933
Epoch 41: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4859 - accuracy: 0.0933 - val_loss: 2.4905 - val_accuracy: 0.0625
Epoch 42/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0806
Epoch 42: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4858 - accuracy: 0.0806 - val_loss: 2.4891 - val_accuracy: 0.0625
Epoch 43/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0742
Epoch 43: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4855 - accuracy: 0.0742 - val_loss: 2.4897 - val_accuracy: 0.0812
Epoch 44/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0757
Epoch 44: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4865 - accuracy: 0.0757 - val_loss: 2.4896 - val_accuracy: 0.0500
Epoch 45/105
128/128 [==============================] - ETA: 0s - loss: 2.4861 - accuracy: 0.0708
Epoch 45: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4861 - accuracy: 0.0708 - val_loss: 2.4869 - val_accuracy: 0.1063
Epoch 46/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0928
Epoch 46: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0928 - val_loss: 2.4876 - val_accuracy: 0.0688
Epoch 47/105
128/128 [==============================] - ETA: 0s - loss: 2.4862 - accuracy: 0.0762
Epoch 47: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4862 - accuracy: 0.0762 - val_loss: 2.4871 - val_accuracy: 0.0500
Epoch 48/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0781
Epoch 48: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0781 - val_loss: 2.4865 - val_accuracy: 0.0625
Epoch 49/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0820
Epoch 49: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4854 - accuracy: 0.0820 - val_loss: 2.4828 - val_accuracy: 0.0625
Epoch 50/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0952
Epoch 50: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4853 - accuracy: 0.0952 - val_loss: 2.4865 - val_accuracy: 0.0625
Epoch 51/105
128/128 [==============================] - ETA: 0s - loss: 2.4851 - accuracy: 0.0913
Epoch 51: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 102ms/step - loss: 2.4851 - accuracy: 0.0913 - val_loss: 2.4912 - val_accuracy: 0.0500
Epoch 52/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0879
Epoch 52: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4854 - accuracy: 0.0879 - val_loss: 2.4917 - val_accuracy: 0.0625
Epoch 53/105
128/128 [==============================] - ETA: 0s - loss: 2.4851 - accuracy: 0.0933
Epoch 53: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4851 - accuracy: 0.0933 - val_loss: 2.4888 - val_accuracy: 0.0625
Epoch 54/105
128/128 [==============================] - ETA: 0s - loss: 2.4864 - accuracy: 0.0859
Epoch 54: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4864 - accuracy: 0.0859 - val_loss: 2.4885 - val_accuracy: 0.0625
Epoch 55/105
128/128 [==============================] - ETA: 0s - loss: 2.4844 - accuracy: 0.0879
Epoch 55: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4844 - accuracy: 0.0879 - val_loss: 2.4895 - val_accuracy: 0.0625
Epoch 56/105
128/128 [==============================] - ETA: 0s - loss: 2.4861 - accuracy: 0.0913
Epoch 56: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4861 - accuracy: 0.0913 - val_loss: 2.4865 - val_accuracy: 0.0625
Epoch 57/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0674
Epoch 57: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4860 - accuracy: 0.0674 - val_loss: 2.4864 - val_accuracy: 0.0812
Epoch 58/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0742
Epoch 58: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4860 - accuracy: 0.0742 - val_loss: 2.4860 - val_accuracy: 0.1000
Epoch 59/105
128/128 [==============================] - ETA: 0s - loss: 2.4863 - accuracy: 0.0767
Epoch 59: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4863 - accuracy: 0.0767 - val_loss: 2.4875 - val_accuracy: 0.0500
Epoch 60/105
128/128 [==============================] - ETA: 0s - loss: 2.4847 - accuracy: 0.0864
Epoch 60: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 102ms/step - loss: 2.4847 - accuracy: 0.0864 - val_loss: 2.4876 - val_accuracy: 0.0500
Epoch 61/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0762
Epoch 61: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4857 - accuracy: 0.0762 - val_loss: 2.4861 - val_accuracy: 0.1000
Epoch 62/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0781
Epoch 62: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4853 - accuracy: 0.0781 - val_loss: 2.4859 - val_accuracy: 0.1000
Epoch 63/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0742
Epoch 63: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4865 - accuracy: 0.0742 - val_loss: 2.4879 - val_accuracy: 0.0500
Epoch 64/105
128/128 [==============================] - ETA: 0s - loss: 2.4850 - accuracy: 0.0835
Epoch 64: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4850 - accuracy: 0.0835 - val_loss: 2.4861 - val_accuracy: 0.0875
Epoch 65/105
128/128 [==============================] - ETA: 0s - loss: 2.4862 - accuracy: 0.0825
Epoch 65: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4862 - accuracy: 0.0825 - val_loss: 2.4869 - val_accuracy: 0.0625
Epoch 66/105
128/128 [==============================] - ETA: 0s - loss: 2.4848 - accuracy: 0.0972
Epoch 66: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4848 - accuracy: 0.0972 - val_loss: 2.4862 - val_accuracy: 0.0625
Epoch 67/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0874
Epoch 67: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4858 - accuracy: 0.0874 - val_loss: 2.4869 - val_accuracy: 0.0625
Epoch 68/105
128/128 [==============================] - ETA: 0s - loss: 2.4850 - accuracy: 0.0869
Epoch 68: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4850 - accuracy: 0.0869 - val_loss: 2.4880 - val_accuracy: 0.0688
Epoch 69/105
128/128 [==============================] - ETA: 0s - loss: 2.4862 - accuracy: 0.0781
Epoch 69: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4862 - accuracy: 0.0781 - val_loss: 2.4870 - val_accuracy: 0.0625
Epoch 70/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0791
Epoch 70: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4858 - accuracy: 0.0791 - val_loss: 2.4853 - val_accuracy: 0.0625
Epoch 71/105
128/128 [==============================] - ETA: 0s - loss: 2.4862 - accuracy: 0.0684
Epoch 71: val_accuracy did not improve from 0.11875
128/128 [==============================] - 13s 103ms/step - loss: 2.4862 - accuracy: 0.0684 - val_loss: 2.4858 - val_accuracy: 0.1063
Restoring model weights from the end of the best epoch: 31.
Epoch 71: early stopping
********* Training time: 2052.109375 s.
*****************
* Model Summary *
*****************
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
predictions (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 134,309,708
Non-trainable params: 0
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 6s 27ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.0000 0.0000 0.0000 305
anarrhichomenum 0.0000 0.0000 0.0000 322
brevantherum 0.0000 0.0000 0.0000 266
dulcamara 0.0000 0.0000 0.0000 311
herposolanum 0.0000 0.0000 0.0000 279
holophylla 0.0000 0.0000 0.0000 282
lasiocarpa 0.0869 1.0000 0.1600 313
melongena 0.0000 0.0000 0.0000 314
micracantha 0.0000 0.0000 0.0000 299
petota 0.0000 0.0000 0.0000 300
solanum 0.0000 0.0000 0.0000 304
torva 0.0000 0.0000 0.0000 305
accuracy 0.0869 3600
macro avg 0.0072 0.0833 0.0133 3600
weighted avg 0.0076 0.0869 0.0139 3600
********************
* Confusion Matrix *
********************
C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
************************************** * Train/Val Accuracy and Loss graphs * **************************************
****************************
* Started at 2074.59375... *
****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
128/128 [==============================] - ETA: 0s - loss: 140.9845 - accuracy: 0.0869
Epoch 1: val_accuracy improved from -inf to 0.08125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_2.h5
128/128 [==============================] - 27s 207ms/step - loss: 140.9845 - accuracy: 0.0869 - val_loss: 2.4867 - val_accuracy: 0.0812
Epoch 2/105
128/128 [==============================] - ETA: 0s - loss: 2.4871 - accuracy: 0.0884
Epoch 2: val_accuracy did not improve from 0.08125
128/128 [==============================] - 13s 102ms/step - loss: 2.4871 - accuracy: 0.0884 - val_loss: 2.4844 - val_accuracy: 0.0812
Epoch 3/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0811
Epoch 3: val_accuracy improved from 0.08125 to 0.10000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_2.h5
128/128 [==============================] - 27s 212ms/step - loss: 2.4860 - accuracy: 0.0811 - val_loss: 2.4847 - val_accuracy: 0.1000
Epoch 4/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0801
Epoch 4: val_accuracy did not improve from 0.10000
128/128 [==============================] - 14s 102ms/step - loss: 2.4858 - accuracy: 0.0801 - val_loss: 2.4855 - val_accuracy: 0.0688
Epoch 5/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0801
Epoch 5: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4858 - accuracy: 0.0801 - val_loss: 2.4858 - val_accuracy: 0.0812
Epoch 6/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0898
Epoch 6: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4853 - accuracy: 0.0898 - val_loss: 2.4859 - val_accuracy: 0.0812
Epoch 7/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0825
Epoch 7: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0825 - val_loss: 2.4861 - val_accuracy: 0.0812
Epoch 8/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0840
Epoch 8: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4858 - accuracy: 0.0840 - val_loss: 2.4867 - val_accuracy: 0.0812
Epoch 9/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0874
Epoch 9: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4857 - accuracy: 0.0874 - val_loss: 2.4854 - val_accuracy: 0.0812
Epoch 10/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0845
Epoch 10: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0845 - val_loss: 2.4842 - val_accuracy: 0.0812
Epoch 11/105
128/128 [==============================] - ETA: 0s - loss: 2.4844 - accuracy: 0.0806
Epoch 11: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4844 - accuracy: 0.0806 - val_loss: 2.4842 - val_accuracy: 0.0812
Epoch 12/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0864
Epoch 12: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4853 - accuracy: 0.0864 - val_loss: 2.4837 - val_accuracy: 0.0812
Epoch 13/105
128/128 [==============================] - ETA: 0s - loss: 2.4848 - accuracy: 0.0835
Epoch 13: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4848 - accuracy: 0.0835 - val_loss: 2.4836 - val_accuracy: 0.0812
Epoch 14/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0845
Epoch 14: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4857 - accuracy: 0.0845 - val_loss: 2.4832 - val_accuracy: 0.0812
Epoch 15/105
128/128 [==============================] - ETA: 0s - loss: 2.4852 - accuracy: 0.0889
Epoch 15: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4852 - accuracy: 0.0889 - val_loss: 2.4849 - val_accuracy: 0.0812
Epoch 16/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0796
Epoch 16: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4859 - accuracy: 0.0796 - val_loss: 2.4856 - val_accuracy: 0.0750
Epoch 17/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0791
Epoch 17: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4857 - accuracy: 0.0791 - val_loss: 2.4851 - val_accuracy: 0.0812
Epoch 18/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0757
Epoch 18: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4858 - accuracy: 0.0757 - val_loss: 2.4854 - val_accuracy: 0.0625
Epoch 19/105
128/128 [==============================] - ETA: 0s - loss: 2.4851 - accuracy: 0.0747
Epoch 19: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4851 - accuracy: 0.0747 - val_loss: 2.4855 - val_accuracy: 0.0812
Epoch 20/105
128/128 [==============================] - ETA: 0s - loss: 2.4863 - accuracy: 0.0801
Epoch 20: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4863 - accuracy: 0.0801 - val_loss: 2.4864 - val_accuracy: 0.0688
Epoch 21/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0864
Epoch 21: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4853 - accuracy: 0.0864 - val_loss: 2.4886 - val_accuracy: 0.0688
Epoch 22/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0825
Epoch 22: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0825 - val_loss: 2.4874 - val_accuracy: 0.0625
Epoch 23/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0786
Epoch 23: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0786 - val_loss: 2.4868 - val_accuracy: 0.0750
Epoch 24/105
128/128 [==============================] - ETA: 0s - loss: 2.4845 - accuracy: 0.0776
Epoch 24: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4845 - accuracy: 0.0776 - val_loss: 2.4860 - val_accuracy: 0.0812
Epoch 25/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0835
Epoch 25: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4865 - accuracy: 0.0835 - val_loss: 2.4846 - val_accuracy: 0.1000
Epoch 26/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0781
Epoch 26: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4865 - accuracy: 0.0781 - val_loss: 2.4849 - val_accuracy: 0.1000
Epoch 27/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0825
Epoch 27: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4855 - accuracy: 0.0825 - val_loss: 2.4857 - val_accuracy: 0.0750
Epoch 28/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0908
Epoch 28: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4854 - accuracy: 0.0908 - val_loss: 2.4867 - val_accuracy: 0.0812
Epoch 29/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0776
Epoch 29: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4854 - accuracy: 0.0776 - val_loss: 2.4876 - val_accuracy: 0.0812
Epoch 30/105
128/128 [==============================] - ETA: 0s - loss: 2.4848 - accuracy: 0.0928
Epoch 30: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4848 - accuracy: 0.0928 - val_loss: 2.4871 - val_accuracy: 0.0812
Epoch 31/105
128/128 [==============================] - ETA: 0s - loss: 2.4843 - accuracy: 0.0903
Epoch 31: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4843 - accuracy: 0.0903 - val_loss: 2.4880 - val_accuracy: 0.0750
Epoch 32/105
128/128 [==============================] - ETA: 0s - loss: 2.4870 - accuracy: 0.0825
Epoch 32: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 102ms/step - loss: 2.4870 - accuracy: 0.0825 - val_loss: 2.4868 - val_accuracy: 0.0750
Epoch 33/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0835
Epoch 33: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0835 - val_loss: 2.4868 - val_accuracy: 0.0812
Epoch 34/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0869
Epoch 34: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4854 - accuracy: 0.0869 - val_loss: 2.4882 - val_accuracy: 0.0812
Epoch 35/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0859
Epoch 35: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4865 - accuracy: 0.0859 - val_loss: 2.4863 - val_accuracy: 0.0812
Epoch 36/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0825
Epoch 36: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 104ms/step - loss: 2.4856 - accuracy: 0.0825 - val_loss: 2.4862 - val_accuracy: 0.0812
Epoch 37/105
128/128 [==============================] - ETA: 0s - loss: 2.4848 - accuracy: 0.0825
Epoch 37: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4848 - accuracy: 0.0825 - val_loss: 2.4885 - val_accuracy: 0.0812
Epoch 38/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0820
Epoch 38: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4853 - accuracy: 0.0820 - val_loss: 2.4870 - val_accuracy: 0.0812
Epoch 39/105
128/128 [==============================] - ETA: 0s - loss: 2.4863 - accuracy: 0.0757
Epoch 39: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4863 - accuracy: 0.0757 - val_loss: 2.4869 - val_accuracy: 0.0812
Epoch 40/105
128/128 [==============================] - ETA: 0s - loss: 2.4861 - accuracy: 0.0747
Epoch 40: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4861 - accuracy: 0.0747 - val_loss: 2.4853 - val_accuracy: 0.0688
Epoch 41/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0815
Epoch 41: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4858 - accuracy: 0.0815 - val_loss: 2.4836 - val_accuracy: 0.1000
Epoch 42/105
128/128 [==============================] - ETA: 0s - loss: 2.4847 - accuracy: 0.0898
Epoch 42: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4847 - accuracy: 0.0898 - val_loss: 2.4836 - val_accuracy: 0.0812
Epoch 43/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0771
Epoch 43: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 103ms/step - loss: 2.4859 - accuracy: 0.0771 - val_loss: 2.4861 - val_accuracy: 0.0812
Restoring model weights from the end of the best epoch: 3.
Epoch 43: early stopping
********* Training time: 1237.046875 s.
*****************
* Model Summary *
*****************
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
predictions (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 134,309,708
Non-trainable params: 0
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 6s 25ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.0000 0.0000 0.0000 277
anarrhichomenum 0.0000 0.0000 0.0000 315
brevantherum 0.0000 0.0000 0.0000 307
dulcamara 0.0825 1.0000 0.1524 297
herposolanum 0.0000 0.0000 0.0000 288
holophylla 0.0000 0.0000 0.0000 286
lasiocarpa 0.0000 0.0000 0.0000 314
melongena 0.0000 0.0000 0.0000 291
micracantha 0.0000 0.0000 0.0000 292
petota 0.0000 0.0000 0.0000 328
solanum 0.0000 0.0000 0.0000 304
torva 0.0000 0.0000 0.0000 301
accuracy 0.0825 3600
macro avg 0.0069 0.0833 0.0127 3600
weighted avg 0.0068 0.0825 0.0126 3600
********************
* Confusion Matrix *
********************
C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
************************************** * Train/Val Accuracy and Loss graphs * **************************************
*****************************
* Started at 3323.390625... *
*****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
128/128 [==============================] - ETA: 0s - loss: 76.3238 - accuracy: 0.0898
Epoch 1: val_accuracy improved from -inf to 0.05000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_3.h5
128/128 [==============================] - 28s 213ms/step - loss: 76.3238 - accuracy: 0.0898 - val_loss: 2.4914 - val_accuracy: 0.0500
Epoch 2/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0752
Epoch 2: val_accuracy improved from 0.05000 to 0.07500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_3.h5
128/128 [==============================] - 35s 273ms/step - loss: 2.4865 - accuracy: 0.0752 - val_loss: 2.4854 - val_accuracy: 0.0750
Epoch 3/105
128/128 [==============================] - ETA: 0s - loss: 2.4844 - accuracy: 0.0845
Epoch 3: val_accuracy improved from 0.07500 to 0.11250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_3.h5
128/128 [==============================] - 33s 256ms/step - loss: 2.4844 - accuracy: 0.0845 - val_loss: 2.4889 - val_accuracy: 0.1125
Epoch 4/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0786
Epoch 4: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4857 - accuracy: 0.0786 - val_loss: 2.4866 - val_accuracy: 0.0625
Epoch 5/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0747
Epoch 5: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4854 - accuracy: 0.0747 - val_loss: 2.4847 - val_accuracy: 0.1000
Epoch 6/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0752
Epoch 6: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4857 - accuracy: 0.0752 - val_loss: 2.4835 - val_accuracy: 0.0875
Epoch 7/105
128/128 [==============================] - ETA: 0s - loss: 2.4845 - accuracy: 0.0869
Epoch 7: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4845 - accuracy: 0.0869 - val_loss: 2.4791 - val_accuracy: 0.1000
Epoch 8/105
128/128 [==============================] - ETA: 0s - loss: 2.4864 - accuracy: 0.0747
Epoch 8: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4864 - accuracy: 0.0747 - val_loss: 2.4824 - val_accuracy: 0.0750
Epoch 9/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0874
Epoch 9: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4856 - accuracy: 0.0874 - val_loss: 2.4872 - val_accuracy: 0.0688
Epoch 10/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0840
Epoch 10: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4859 - accuracy: 0.0840 - val_loss: 2.4873 - val_accuracy: 0.0688
Epoch 11/105
128/128 [==============================] - ETA: 0s - loss: 2.4843 - accuracy: 0.0952
Epoch 11: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4843 - accuracy: 0.0952 - val_loss: 2.4862 - val_accuracy: 0.0812
Epoch 12/105
128/128 [==============================] - ETA: 0s - loss: 2.4853 - accuracy: 0.0879
Epoch 12: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4853 - accuracy: 0.0879 - val_loss: 2.4894 - val_accuracy: 0.0812
Epoch 13/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0791
Epoch 13: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4856 - accuracy: 0.0791 - val_loss: 2.4853 - val_accuracy: 0.1125
Epoch 14/105
128/128 [==============================] - ETA: 0s - loss: 2.4861 - accuracy: 0.0776
Epoch 14: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4861 - accuracy: 0.0776 - val_loss: 2.4877 - val_accuracy: 0.1000
Epoch 15/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0894
Epoch 15: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4854 - accuracy: 0.0894 - val_loss: 2.4882 - val_accuracy: 0.0688
Epoch 16/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0742
Epoch 16: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4860 - accuracy: 0.0742 - val_loss: 2.4895 - val_accuracy: 0.0625
Epoch 17/105
128/128 [==============================] - ETA: 0s - loss: 2.4849 - accuracy: 0.0859
Epoch 17: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4849 - accuracy: 0.0859 - val_loss: 2.4913 - val_accuracy: 0.0750
Epoch 18/105
128/128 [==============================] - ETA: 0s - loss: 2.4846 - accuracy: 0.0801
Epoch 18: val_accuracy did not improve from 0.11250
128/128 [==============================] - 14s 106ms/step - loss: 2.4846 - accuracy: 0.0801 - val_loss: 2.4935 - val_accuracy: 0.0500
Epoch 19/105
128/128 [==============================] - ETA: 0s - loss: 2.4862 - accuracy: 0.0781
Epoch 19: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4862 - accuracy: 0.0781 - val_loss: 2.4894 - val_accuracy: 0.0750
Epoch 20/105
128/128 [==============================] - ETA: 0s - loss: 2.4850 - accuracy: 0.0845
Epoch 20: val_accuracy did not improve from 0.11250
128/128 [==============================] - 14s 111ms/step - loss: 2.4850 - accuracy: 0.0845 - val_loss: 2.4888 - val_accuracy: 0.1000
Epoch 21/105
128/128 [==============================] - ETA: 0s - loss: 2.4864 - accuracy: 0.0820
Epoch 21: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 101ms/step - loss: 2.4864 - accuracy: 0.0820 - val_loss: 2.4869 - val_accuracy: 0.0812
Epoch 22/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0742
Epoch 22: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4860 - accuracy: 0.0742 - val_loss: 2.4883 - val_accuracy: 0.0688
Epoch 23/105
128/128 [==============================] - ETA: 0s - loss: 2.4849 - accuracy: 0.0903
Epoch 23: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4849 - accuracy: 0.0903 - val_loss: 2.4889 - val_accuracy: 0.0688
Epoch 24/105
128/128 [==============================] - ETA: 0s - loss: 2.4840 - accuracy: 0.0894
Epoch 24: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4840 - accuracy: 0.0894 - val_loss: 2.4881 - val_accuracy: 0.1000
Epoch 25/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0874
Epoch 25: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4865 - accuracy: 0.0874 - val_loss: 2.4868 - val_accuracy: 0.1000
Epoch 26/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0723
Epoch 26: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4860 - accuracy: 0.0723 - val_loss: 2.4869 - val_accuracy: 0.1000
Epoch 27/105
128/128 [==============================] - ETA: 0s - loss: 2.4843 - accuracy: 0.0938
Epoch 27: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 105ms/step - loss: 2.4843 - accuracy: 0.0938 - val_loss: 2.4847 - val_accuracy: 0.1125
Epoch 28/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0781
Epoch 28: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4859 - accuracy: 0.0781 - val_loss: 2.4822 - val_accuracy: 0.1125
Epoch 29/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0962
Epoch 29: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4857 - accuracy: 0.0962 - val_loss: 2.4835 - val_accuracy: 0.1000
Epoch 30/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0815
Epoch 30: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4857 - accuracy: 0.0815 - val_loss: 2.4858 - val_accuracy: 0.1000
Epoch 31/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0732
Epoch 31: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4860 - accuracy: 0.0732 - val_loss: 2.4895 - val_accuracy: 0.0688
Epoch 32/105
128/128 [==============================] - ETA: 0s - loss: 2.4864 - accuracy: 0.0869
Epoch 32: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4864 - accuracy: 0.0869 - val_loss: 2.4846 - val_accuracy: 0.0875
Epoch 33/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0869
Epoch 33: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4855 - accuracy: 0.0869 - val_loss: 2.4850 - val_accuracy: 0.0688
Epoch 34/105
128/128 [==============================] - ETA: 0s - loss: 2.4850 - accuracy: 0.0864
Epoch 34: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4850 - accuracy: 0.0864 - val_loss: 2.4897 - val_accuracy: 0.0688
Epoch 35/105
128/128 [==============================] - ETA: 0s - loss: 2.4841 - accuracy: 0.0874
Epoch 35: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4841 - accuracy: 0.0874 - val_loss: 2.4934 - val_accuracy: 0.0625
Epoch 36/105
128/128 [==============================] - ETA: 0s - loss: 2.4849 - accuracy: 0.0850
Epoch 36: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4849 - accuracy: 0.0850 - val_loss: 2.4923 - val_accuracy: 0.0688
Epoch 37/105
128/128 [==============================] - ETA: 0s - loss: 2.4847 - accuracy: 0.0923
Epoch 37: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4847 - accuracy: 0.0923 - val_loss: 2.4904 - val_accuracy: 0.0688
Epoch 38/105
128/128 [==============================] - ETA: 0s - loss: 2.4865 - accuracy: 0.0801
Epoch 38: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4865 - accuracy: 0.0801 - val_loss: 2.4874 - val_accuracy: 0.0688
Epoch 39/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0884
Epoch 39: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4854 - accuracy: 0.0884 - val_loss: 2.4877 - val_accuracy: 0.0688
Epoch 40/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0854
Epoch 40: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0854 - val_loss: 2.4878 - val_accuracy: 0.0688
Epoch 41/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0757
Epoch 41: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 102ms/step - loss: 2.4855 - accuracy: 0.0757 - val_loss: 2.4883 - val_accuracy: 0.0625
Epoch 42/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0781
Epoch 42: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4857 - accuracy: 0.0781 - val_loss: 2.4865 - val_accuracy: 0.0812
Epoch 43/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0767
Epoch 43: val_accuracy did not improve from 0.11250
128/128 [==============================] - 13s 103ms/step - loss: 2.4860 - accuracy: 0.0767 - val_loss: 2.4852 - val_accuracy: 0.0812
Restoring model weights from the end of the best epoch: 3.
Epoch 43: early stopping
********* Training time: 1226.921875 s.
*****************
* Model Summary *
*****************
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
predictions (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 134,309,708
Non-trainable params: 0
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 6s 26ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.0000 0.0000 0.0000 280
anarrhichomenum 0.0000 0.0000 0.0000 319
brevantherum 0.0000 0.0000 0.0000 267
dulcamara 0.0000 0.0000 0.0000 315
herposolanum 0.0000 0.0000 0.0000 300
holophylla 0.0000 0.0000 0.0000 335
lasiocarpa 0.0858 1.0000 0.1581 309
melongena 0.0000 0.0000 0.0000 285
micracantha 0.0000 0.0000 0.0000 294
petota 0.0000 0.0000 0.0000 301
solanum 0.0000 0.0000 0.0000 299
torva 0.0000 0.0000 0.0000 296
accuracy 0.0858 3600
macro avg 0.0072 0.0833 0.0132 3600
weighted avg 0.0074 0.0858 0.0136 3600
********************
* Confusion Matrix *
********************
C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
************************************** * Train/Val Accuracy and Loss graphs * **************************************
****************************
* Started at 4561.78125... *
****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
128/128 [==============================] - ETA: 0s - loss: 201.7437 - accuracy: 0.0762
Epoch 1: val_accuracy improved from -inf to 0.06875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_4.h5
128/128 [==============================] - 31s 236ms/step - loss: 201.7437 - accuracy: 0.0762 - val_loss: 2.4879 - val_accuracy: 0.0688
Epoch 2/105
128/128 [==============================] - ETA: 0s - loss: 2.4928 - accuracy: 0.0889
Epoch 2: val_accuracy did not improve from 0.06875
128/128 [==============================] - 13s 101ms/step - loss: 2.4928 - accuracy: 0.0889 - val_loss: 2.4808 - val_accuracy: 0.0688
Epoch 3/105
128/128 [==============================] - ETA: 0s - loss: 2.4869 - accuracy: 0.0840
Epoch 3: val_accuracy improved from 0.06875 to 0.09375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_4.h5
128/128 [==============================] - 36s 285ms/step - loss: 2.4869 - accuracy: 0.0840 - val_loss: 2.4837 - val_accuracy: 0.0938
Epoch 4/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0820
Epoch 4: val_accuracy did not improve from 0.09375
128/128 [==============================] - 13s 102ms/step - loss: 2.4858 - accuracy: 0.0820 - val_loss: 2.4830 - val_accuracy: 0.0938
Epoch 5/105
128/128 [==============================] - ETA: 0s - loss: 2.4848 - accuracy: 0.0845
Epoch 5: val_accuracy did not improve from 0.09375
128/128 [==============================] - 13s 101ms/step - loss: 2.4848 - accuracy: 0.0845 - val_loss: 2.4827 - val_accuracy: 0.0938
Epoch 6/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0796
Epoch 6: val_accuracy improved from 0.09375 to 0.10000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_4.h5
128/128 [==============================] - 31s 240ms/step - loss: 2.4857 - accuracy: 0.0796 - val_loss: 2.4842 - val_accuracy: 0.1000
Epoch 7/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0801
Epoch 7: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 101ms/step - loss: 2.4855 - accuracy: 0.0801 - val_loss: 2.4877 - val_accuracy: 0.0625
Epoch 8/105
128/128 [==============================] - ETA: 0s - loss: 2.4860 - accuracy: 0.0928
Epoch 8: val_accuracy did not improve from 0.10000
128/128 [==============================] - 13s 101ms/step - loss: 2.4860 - accuracy: 0.0928 - val_loss: 2.4853 - val_accuracy: 0.0625
Epoch 9/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0806
Epoch 9: val_accuracy improved from 0.10000 to 0.10625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_20230216171100_4.h5
128/128 [==============================] - 35s 276ms/step - loss: 2.4855 - accuracy: 0.0806 - val_loss: 2.4850 - val_accuracy: 0.1063
Epoch 10/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0806
Epoch 10: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4859 - accuracy: 0.0806 - val_loss: 2.4849 - val_accuracy: 0.1063
Epoch 11/105
128/128 [==============================] - ETA: 0s - loss: 2.4846 - accuracy: 0.0859
Epoch 11: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4846 - accuracy: 0.0859 - val_loss: 2.4857 - val_accuracy: 0.1063
Epoch 12/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0898
Epoch 12: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4855 - accuracy: 0.0898 - val_loss: 2.4863 - val_accuracy: 0.1063
Epoch 13/105
128/128 [==============================] - ETA: 0s - loss: 2.4847 - accuracy: 0.0820
Epoch 13: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4847 - accuracy: 0.0820 - val_loss: 2.4853 - val_accuracy: 0.0625
Epoch 14/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0850
Epoch 14: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4856 - accuracy: 0.0850 - val_loss: 2.4858 - val_accuracy: 0.1000
Epoch 15/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0825
Epoch 15: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4855 - accuracy: 0.0825 - val_loss: 2.4897 - val_accuracy: 0.0750
Epoch 16/105
128/128 [==============================] - ETA: 0s - loss: 2.4852 - accuracy: 0.0796
Epoch 16: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4852 - accuracy: 0.0796 - val_loss: 2.4846 - val_accuracy: 0.1000
Epoch 17/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0854
Epoch 17: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4856 - accuracy: 0.0854 - val_loss: 2.4862 - val_accuracy: 0.0750
Epoch 18/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0825
Epoch 18: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4854 - accuracy: 0.0825 - val_loss: 2.4899 - val_accuracy: 0.0562
Epoch 19/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0815
Epoch 19: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4859 - accuracy: 0.0815 - val_loss: 2.4903 - val_accuracy: 0.0562
Epoch 20/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0771
Epoch 20: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4857 - accuracy: 0.0771 - val_loss: 2.4899 - val_accuracy: 0.0688
Epoch 21/105
128/128 [==============================] - ETA: 0s - loss: 2.4855 - accuracy: 0.0854
Epoch 21: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4855 - accuracy: 0.0854 - val_loss: 2.4891 - val_accuracy: 0.0562
Epoch 22/105
128/128 [==============================] - ETA: 0s - loss: 2.4864 - accuracy: 0.0854
Epoch 22: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4864 - accuracy: 0.0854 - val_loss: 2.4873 - val_accuracy: 0.0562
Epoch 23/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0801
Epoch 23: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4858 - accuracy: 0.0801 - val_loss: 2.4868 - val_accuracy: 0.1000
Epoch 24/105
128/128 [==============================] - ETA: 0s - loss: 2.4844 - accuracy: 0.0830
Epoch 24: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 104ms/step - loss: 2.4844 - accuracy: 0.0830 - val_loss: 2.4882 - val_accuracy: 0.1000
Epoch 25/105
128/128 [==============================] - ETA: 0s - loss: 2.4863 - accuracy: 0.0859
Epoch 25: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4863 - accuracy: 0.0859 - val_loss: 2.4888 - val_accuracy: 0.0562
Epoch 26/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0874
Epoch 26: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4858 - accuracy: 0.0874 - val_loss: 2.4874 - val_accuracy: 0.0562
Epoch 27/105
128/128 [==============================] - ETA: 0s - loss: 2.4845 - accuracy: 0.0864
Epoch 27: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4845 - accuracy: 0.0864 - val_loss: 2.4880 - val_accuracy: 0.1063
Epoch 28/105
128/128 [==============================] - ETA: 0s - loss: 2.4864 - accuracy: 0.0815
Epoch 28: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4864 - accuracy: 0.0815 - val_loss: 2.4851 - val_accuracy: 0.1063
Epoch 29/105
128/128 [==============================] - ETA: 0s - loss: 2.4859 - accuracy: 0.0820
Epoch 29: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4859 - accuracy: 0.0820 - val_loss: 2.4854 - val_accuracy: 0.1000
Epoch 30/105
128/128 [==============================] - ETA: 0s - loss: 2.4851 - accuracy: 0.0845
Epoch 30: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4851 - accuracy: 0.0845 - val_loss: 2.4855 - val_accuracy: 0.0625
Epoch 31/105
128/128 [==============================] - ETA: 0s - loss: 2.4844 - accuracy: 0.0835
Epoch 31: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4844 - accuracy: 0.0835 - val_loss: 2.4891 - val_accuracy: 0.0562
Epoch 32/105
128/128 [==============================] - ETA: 0s - loss: 2.4835 - accuracy: 0.0903
Epoch 32: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4835 - accuracy: 0.0903 - val_loss: 2.4885 - val_accuracy: 0.1063
Epoch 33/105
128/128 [==============================] - ETA: 0s - loss: 2.4873 - accuracy: 0.0815
Epoch 33: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4873 - accuracy: 0.0815 - val_loss: 2.4854 - val_accuracy: 0.1063
Epoch 34/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0728
Epoch 34: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4854 - accuracy: 0.0728 - val_loss: 2.4841 - val_accuracy: 0.0562
Epoch 35/105
128/128 [==============================] - ETA: 0s - loss: 2.4871 - accuracy: 0.0767
Epoch 35: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4871 - accuracy: 0.0767 - val_loss: 2.4861 - val_accuracy: 0.0562
Epoch 36/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0879
Epoch 36: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0879 - val_loss: 2.4851 - val_accuracy: 0.0562
Epoch 37/105
128/128 [==============================] - ETA: 0s - loss: 2.4856 - accuracy: 0.0845
Epoch 37: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4856 - accuracy: 0.0845 - val_loss: 2.4841 - val_accuracy: 0.1000
Epoch 38/105
128/128 [==============================] - ETA: 0s - loss: 2.4846 - accuracy: 0.0928
Epoch 38: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4846 - accuracy: 0.0928 - val_loss: 2.4837 - val_accuracy: 0.1000
Epoch 39/105
128/128 [==============================] - ETA: 0s - loss: 2.4848 - accuracy: 0.0898
Epoch 39: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4848 - accuracy: 0.0898 - val_loss: 2.4833 - val_accuracy: 0.1000
Epoch 40/105
128/128 [==============================] - ETA: 0s - loss: 2.4844 - accuracy: 0.0845
Epoch 40: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4844 - accuracy: 0.0845 - val_loss: 2.4857 - val_accuracy: 0.1063
Epoch 41/105
128/128 [==============================] - ETA: 0s - loss: 2.4862 - accuracy: 0.0864
Epoch 41: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4862 - accuracy: 0.0864 - val_loss: 2.4858 - val_accuracy: 0.1000
Epoch 42/105
128/128 [==============================] - ETA: 0s - loss: 2.4857 - accuracy: 0.0767
Epoch 42: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4857 - accuracy: 0.0767 - val_loss: 2.4876 - val_accuracy: 0.1063
Epoch 43/105
128/128 [==============================] - ETA: 0s - loss: 2.4861 - accuracy: 0.0767
Epoch 43: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4861 - accuracy: 0.0767 - val_loss: 2.4874 - val_accuracy: 0.0562
Epoch 44/105
128/128 [==============================] - ETA: 0s - loss: 2.4851 - accuracy: 0.0840
Epoch 44: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4851 - accuracy: 0.0840 - val_loss: 2.4874 - val_accuracy: 0.1063
Epoch 45/105
128/128 [==============================] - ETA: 0s - loss: 2.4851 - accuracy: 0.0811
Epoch 45: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4851 - accuracy: 0.0811 - val_loss: 2.4908 - val_accuracy: 0.0625
Epoch 46/105
128/128 [==============================] - ETA: 0s - loss: 2.4852 - accuracy: 0.0918
Epoch 46: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4852 - accuracy: 0.0918 - val_loss: 2.4897 - val_accuracy: 0.0625
Epoch 47/105
128/128 [==============================] - ETA: 0s - loss: 2.4854 - accuracy: 0.0806
Epoch 47: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4854 - accuracy: 0.0806 - val_loss: 2.4902 - val_accuracy: 0.0688
Epoch 48/105
128/128 [==============================] - ETA: 0s - loss: 2.4863 - accuracy: 0.0840
Epoch 48: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 103ms/step - loss: 2.4863 - accuracy: 0.0840 - val_loss: 2.4889 - val_accuracy: 0.0562
Epoch 49/105
128/128 [==============================] - ETA: 0s - loss: 2.4858 - accuracy: 0.0884
Epoch 49: val_accuracy did not improve from 0.10625
128/128 [==============================] - 13s 102ms/step - loss: 2.4858 - accuracy: 0.0884 - val_loss: 2.4853 - val_accuracy: 0.0562
Restoring model weights from the end of the best epoch: 9.
Epoch 49: early stopping
********* Training time: 1398.515625 s.
*****************
* Model Summary *
*****************
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_4 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
predictions (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 134,309,708
Non-trainable params: 0
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 6s 26ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.0000 0.0000 0.0000 331
anarrhichomenum 0.0000 0.0000 0.0000 319
brevantherum 0.0000 0.0000 0.0000 312
dulcamara 0.0000 0.0000 0.0000 259
herposolanum 0.0000 0.0000 0.0000 273
holophylla 0.0000 0.0000 0.0000 316
lasiocarpa 0.0000 0.0000 0.0000 314
melongena 0.0000 0.0000 0.0000 302
micracantha 0.0875 1.0000 0.1609 315
petota 0.0000 0.0000 0.0000 289
solanum 0.0000 0.0000 0.0000 271
torva 0.0000 0.0000 0.0000 299
accuracy 0.0875 3600
macro avg 0.0073 0.0833 0.0134 3600
weighted avg 0.0077 0.0875 0.0141 3600
********************
* Confusion Matrix *
********************
C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
************************************** * Train/Val Accuracy and Loss graphs * **************************************
******************************* * Mean metrics across 4 folds * *******************************
| 0 | |
|---|---|
| accuracy | 0.085694 |
| acanthophora.precision | 0.000000 |
| acanthophora.recall | 0.000000 |
| acanthophora.f1-score | 0.000000 |
| acanthophora.support | 298.250000 |
| anarrhichomenum.precision | 0.000000 |
| anarrhichomenum.recall | 0.000000 |
| anarrhichomenum.f1-score | 0.000000 |
| anarrhichomenum.support | 318.750000 |
| brevantherum.precision | 0.000000 |
| brevantherum.recall | 0.000000 |
| brevantherum.f1-score | 0.000000 |
| brevantherum.support | 288.000000 |
| dulcamara.precision | 0.020625 |
| dulcamara.recall | 0.250000 |
| dulcamara.f1-score | 0.038106 |
| dulcamara.support | 295.500000 |
| herposolanum.precision | 0.000000 |
| herposolanum.recall | 0.000000 |
| herposolanum.f1-score | 0.000000 |
| herposolanum.support | 285.000000 |
| holophylla.precision | 0.000000 |
| holophylla.recall | 0.000000 |
| holophylla.f1-score | 0.000000 |
| holophylla.support | 304.750000 |
| lasiocarpa.precision | 0.043194 |
| lasiocarpa.recall | 0.500000 |
| lasiocarpa.f1-score | 0.079519 |
| lasiocarpa.support | 312.500000 |
| melongena.precision | 0.000000 |
| melongena.recall | 0.000000 |
| melongena.f1-score | 0.000000 |
| melongena.support | 298.000000 |
| micracantha.precision | 0.021875 |
| micracantha.recall | 0.250000 |
| micracantha.f1-score | 0.040230 |
| micracantha.support | 300.000000 |
| petota.precision | 0.000000 |
| petota.recall | 0.000000 |
| petota.f1-score | 0.000000 |
| petota.support | 304.500000 |
| solanum.precision | 0.000000 |
| solanum.recall | 0.000000 |
| solanum.f1-score | 0.000000 |
| solanum.support | 294.500000 |
| torva.precision | 0.000000 |
| torva.recall | 0.000000 |
| torva.f1-score | 0.000000 |
| torva.support | 300.250000 |
| macro avg.precision | 0.007141 |
| macro avg.recall | 0.083333 |
| macro avg.f1-score | 0.013155 |
| macro avg.support | 3600.000000 |
| weighted avg.precision | 0.007347 |
| weighted avg.recall | 0.085694 |
| weighted avg.f1-score | 0.013534 |
| weighted avg.support | 3600.000000 |
CPU times: total: 1h 39min 21s Wall time: 50min 45s
%%time
kf = ShuffleSplit(n_splits=CV_FOLDS,
test_size=VAL_SIZE,
random_state=RANDOM_SEED)
split = 1
vgg16_tl_cv_val_pred = []
for train_index, val_index in kf.split(balanced_training_data):
vgg16_tl_model = tf.keras.applications.vgg16.VGG16(
include_top=True,
weights='imagenet',
pooling="avg",
# classes=12,
classifier_activation='softmax',
)
# Change the output prediction layer to support the 12 classes instead of the 1000 classes in ImageNet
output = vgg16_tl_model.layers[-2].output
predictions = tf.keras.layers.Dense(NUM_CLASSES,
activation="softmax")(output)
vgg16_tl_model = tf.keras.Model(inputs = vgg16_tl_model.input,
outputs = predictions)
# Freeze all layers but the FC and output
for layer in (vgg16_tl_model.layers)[:19]:
layer.trainable = False
# Add an optimizer
vgg16_tl_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.003),
loss='categorical_crossentropy',
metrics=['accuracy'])
#vgg16_tl_model.compile(optimizer=tf.optimizers.SGD(learning_rate=0.0001, momentum=0.9),
# loss='categorical_crossentropy',
# metrics=['accuracy'])
training_split_data = balanced_training_data.iloc[train_index]
val_split_data = balanced_training_data.iloc[val_index]
# Add a progress bar and save checkpoints
vgg16_tl_callbacks = [
create_model_checkpoint(os.path.join(DATA_ROOT_LOCATION, f"vgg16_tl_{TRAINING_RUN_ID}_{split}.h5")),
tf.keras.callbacks.ProgbarLogger(
count_mode = 'steps',
stateful_metrics = None
),
tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy',
min_delta=0,
patience=40,
verbose=1,
mode='auto',
restore_best_weights=True
)
]
fit_params = {
"x": training_split_data,
"epochs": 105,
"callbacks": vgg16_tl_callbacks,
"validation_data": val_split_data,
"steps_per_epoch": 64,
"validation_steps": 10,
}
preproc_func = tf.keras.applications.vgg16.preprocess_input
with tf.device(TRAINING_DEVICE_NAME):
vgg16_tl_training_history = train_model(vgg16_tl_model,
fit_params=fit_params,
preproc_func=preproc_func)
_, pred_report = evaluate_model(vgg16_tl_model,
vgg16_tl_training_history,
fit_params=fit_params,
preproc_func=preproc_func)
vgg16_tl_cv_val_pred.append(pred_report)
split += 1
calculate_cv_mean_metrics(vgg16_tl_cv_val_pred)
***************************
* Started at 12.171875... *
***************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 17.7204 - accuracy: 0.1318
Epoch 1: val_accuracy improved from -inf to 0.14375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 47s 664ms/step - loss: 17.7204 - accuracy: 0.1318 - val_loss: 2.7921 - val_accuracy: 0.1437
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 2.1072 - accuracy: 0.2393
Epoch 2: val_accuracy improved from 0.14375 to 0.29375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 42s 653ms/step - loss: 2.1072 - accuracy: 0.2393 - val_loss: 1.9249 - val_accuracy: 0.2937
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 2.0038 - accuracy: 0.2754
Epoch 3: val_accuracy did not improve from 0.29375
64/64 [==============================] - 19s 290ms/step - loss: 2.0038 - accuracy: 0.2754 - val_loss: 2.0032 - val_accuracy: 0.2500
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 1.9135 - accuracy: 0.2939
Epoch 4: val_accuracy did not improve from 0.29375
64/64 [==============================] - 13s 199ms/step - loss: 1.9135 - accuracy: 0.2939 - val_loss: 2.0673 - val_accuracy: 0.2250
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 1.9007 - accuracy: 0.2832
Epoch 5: val_accuracy improved from 0.29375 to 0.33750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 37s 569ms/step - loss: 1.9007 - accuracy: 0.2832 - val_loss: 1.7949 - val_accuracy: 0.3375
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 1.8988 - accuracy: 0.2920
Epoch 6: val_accuracy improved from 0.33750 to 0.38750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 39s 604ms/step - loss: 1.8988 - accuracy: 0.2920 - val_loss: 1.6587 - val_accuracy: 0.3875
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 1.8206 - accuracy: 0.3193
Epoch 7: val_accuracy did not improve from 0.38750
64/64 [==============================] - 13s 199ms/step - loss: 1.8206 - accuracy: 0.3193 - val_loss: 1.8346 - val_accuracy: 0.3313
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 1.8560 - accuracy: 0.3145
Epoch 8: val_accuracy did not improve from 0.38750
64/64 [==============================] - 13s 197ms/step - loss: 1.8560 - accuracy: 0.3145 - val_loss: 1.7644 - val_accuracy: 0.3063
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 1.8007 - accuracy: 0.3359
Epoch 9: val_accuracy did not improve from 0.38750
64/64 [==============================] - 12s 180ms/step - loss: 1.8007 - accuracy: 0.3359 - val_loss: 1.6989 - val_accuracy: 0.3438
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 1.7822 - accuracy: 0.3359
Epoch 10: val_accuracy did not improve from 0.38750
64/64 [==============================] - 12s 178ms/step - loss: 1.7822 - accuracy: 0.3359 - val_loss: 2.1629 - val_accuracy: 0.2500
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 1.8573 - accuracy: 0.3271
Epoch 11: val_accuracy did not improve from 0.38750
64/64 [==============================] - 11s 172ms/step - loss: 1.8573 - accuracy: 0.3271 - val_loss: 1.8467 - val_accuracy: 0.3438
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 1.7827 - accuracy: 0.3506
Epoch 12: val_accuracy did not improve from 0.38750
64/64 [==============================] - 10s 149ms/step - loss: 1.7827 - accuracy: 0.3506 - val_loss: 2.0629 - val_accuracy: 0.3063
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 1.8045 - accuracy: 0.3457
Epoch 13: val_accuracy improved from 0.38750 to 0.40625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 33s 518ms/step - loss: 1.8045 - accuracy: 0.3457 - val_loss: 1.5942 - val_accuracy: 0.4062
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 1.8070 - accuracy: 0.3525
Epoch 14: val_accuracy did not improve from 0.40625
64/64 [==============================] - 11s 170ms/step - loss: 1.8070 - accuracy: 0.3525 - val_loss: 1.6353 - val_accuracy: 0.3688
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 1.7093 - accuracy: 0.3467
Epoch 15: val_accuracy did not improve from 0.40625
64/64 [==============================] - 9s 141ms/step - loss: 1.7093 - accuracy: 0.3467 - val_loss: 1.7120 - val_accuracy: 0.3688
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 1.7342 - accuracy: 0.3555
Epoch 16: val_accuracy improved from 0.40625 to 0.41875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 32s 502ms/step - loss: 1.7342 - accuracy: 0.3555 - val_loss: 1.5410 - val_accuracy: 0.4187
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 1.6355 - accuracy: 0.3730
Epoch 17: val_accuracy did not improve from 0.41875
64/64 [==============================] - 11s 161ms/step - loss: 1.6355 - accuracy: 0.3730 - val_loss: 1.7243 - val_accuracy: 0.3438
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 1.7663 - accuracy: 0.3213
Epoch 18: val_accuracy did not improve from 0.41875
64/64 [==============================] - 9s 146ms/step - loss: 1.7663 - accuracy: 0.3213 - val_loss: 1.7547 - val_accuracy: 0.3500
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 1.7226 - accuracy: 0.3604
Epoch 19: val_accuracy did not improve from 0.41875
64/64 [==============================] - 9s 137ms/step - loss: 1.7226 - accuracy: 0.3604 - val_loss: 1.7196 - val_accuracy: 0.3750
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 1.6731 - accuracy: 0.3867
Epoch 20: val_accuracy did not improve from 0.41875
64/64 [==============================] - 8s 128ms/step - loss: 1.6731 - accuracy: 0.3867 - val_loss: 1.6049 - val_accuracy: 0.4125
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 1.6336 - accuracy: 0.4150
Epoch 21: val_accuracy did not improve from 0.41875
64/64 [==============================] - 8s 125ms/step - loss: 1.6336 - accuracy: 0.4150 - val_loss: 1.6809 - val_accuracy: 0.3500
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 1.6673 - accuracy: 0.3594
Epoch 22: val_accuracy did not improve from 0.41875
64/64 [==============================] - 8s 126ms/step - loss: 1.6673 - accuracy: 0.3594 - val_loss: 1.9297 - val_accuracy: 0.3562
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 1.6387 - accuracy: 0.3936
Epoch 23: val_accuracy did not improve from 0.41875
64/64 [==============================] - 8s 118ms/step - loss: 1.6387 - accuracy: 0.3936 - val_loss: 1.7772 - val_accuracy: 0.3812
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 1.6049 - accuracy: 0.4014
Epoch 24: val_accuracy did not improve from 0.41875
64/64 [==============================] - 8s 118ms/step - loss: 1.6049 - accuracy: 0.4014 - val_loss: 1.6113 - val_accuracy: 0.3875
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 1.6542 - accuracy: 0.3633
Epoch 25: val_accuracy improved from 0.41875 to 0.43750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 30s 480ms/step - loss: 1.6542 - accuracy: 0.3633 - val_loss: 1.5523 - val_accuracy: 0.4375
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 1.6244 - accuracy: 0.3994
Epoch 26: val_accuracy did not improve from 0.43750
64/64 [==============================] - 8s 128ms/step - loss: 1.6244 - accuracy: 0.3994 - val_loss: 1.9127 - val_accuracy: 0.3562
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 1.6794 - accuracy: 0.4072
Epoch 27: val_accuracy improved from 0.43750 to 0.47500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 30s 475ms/step - loss: 1.6794 - accuracy: 0.4072 - val_loss: 1.5136 - val_accuracy: 0.4750
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 1.5675 - accuracy: 0.4424
Epoch 28: val_accuracy did not improve from 0.47500
64/64 [==============================] - 9s 129ms/step - loss: 1.5675 - accuracy: 0.4424 - val_loss: 1.5238 - val_accuracy: 0.4437
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 1.6885 - accuracy: 0.3779
Epoch 29: val_accuracy did not improve from 0.47500
64/64 [==============================] - 8s 120ms/step - loss: 1.6885 - accuracy: 0.3779 - val_loss: 1.5482 - val_accuracy: 0.3938
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 1.5719 - accuracy: 0.4287
Epoch 30: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 111ms/step - loss: 1.5719 - accuracy: 0.4287 - val_loss: 1.5013 - val_accuracy: 0.4500
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 1.5706 - accuracy: 0.4258
Epoch 31: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 112ms/step - loss: 1.5706 - accuracy: 0.4258 - val_loss: 1.6631 - val_accuracy: 0.4250
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 1.5842 - accuracy: 0.4395
Epoch 32: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 110ms/step - loss: 1.5842 - accuracy: 0.4395 - val_loss: 1.7439 - val_accuracy: 0.4250
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 1.5988 - accuracy: 0.4209
Epoch 33: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 109ms/step - loss: 1.5988 - accuracy: 0.4209 - val_loss: 1.7139 - val_accuracy: 0.4062
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 1.5942 - accuracy: 0.4385
Epoch 34: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 110ms/step - loss: 1.5942 - accuracy: 0.4385 - val_loss: 1.7520 - val_accuracy: 0.4062
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 1.6103 - accuracy: 0.4160
Epoch 35: val_accuracy improved from 0.47500 to 0.50625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 31s 490ms/step - loss: 1.6103 - accuracy: 0.4160 - val_loss: 1.4144 - val_accuracy: 0.5063
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 1.5277 - accuracy: 0.4600
Epoch 36: val_accuracy did not improve from 0.50625
64/64 [==============================] - 8s 112ms/step - loss: 1.5277 - accuracy: 0.4600 - val_loss: 1.7619 - val_accuracy: 0.4187
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 1.5708 - accuracy: 0.4316
Epoch 37: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 112ms/step - loss: 1.5708 - accuracy: 0.4316 - val_loss: 1.5884 - val_accuracy: 0.4437
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 1.6199 - accuracy: 0.4248
Epoch 38: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 107ms/step - loss: 1.6199 - accuracy: 0.4248 - val_loss: 1.6657 - val_accuracy: 0.4062
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 1.5616 - accuracy: 0.4404
Epoch 39: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 107ms/step - loss: 1.5616 - accuracy: 0.4404 - val_loss: 1.9636 - val_accuracy: 0.3250
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 1.5434 - accuracy: 0.4336
Epoch 40: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 108ms/step - loss: 1.5434 - accuracy: 0.4336 - val_loss: 1.6731 - val_accuracy: 0.4187
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 1.5703 - accuracy: 0.4326
Epoch 41: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 110ms/step - loss: 1.5703 - accuracy: 0.4326 - val_loss: 1.6319 - val_accuracy: 0.3938
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 1.5465 - accuracy: 0.4473
Epoch 42: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 108ms/step - loss: 1.5465 - accuracy: 0.4473 - val_loss: 1.5500 - val_accuracy: 0.4563
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 1.5903 - accuracy: 0.4404
Epoch 43: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 107ms/step - loss: 1.5903 - accuracy: 0.4404 - val_loss: 1.7327 - val_accuracy: 0.4062
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 1.5304 - accuracy: 0.4424
Epoch 44: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 105ms/step - loss: 1.5304 - accuracy: 0.4424 - val_loss: 1.5608 - val_accuracy: 0.4250
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 1.5687 - accuracy: 0.4404
Epoch 45: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 106ms/step - loss: 1.5687 - accuracy: 0.4404 - val_loss: 1.7591 - val_accuracy: 0.4000
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 1.5225 - accuracy: 0.4365
Epoch 46: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 106ms/step - loss: 1.5225 - accuracy: 0.4365 - val_loss: 1.6563 - val_accuracy: 0.4375
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 1.5803 - accuracy: 0.4248
Epoch 47: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 105ms/step - loss: 1.5803 - accuracy: 0.4248 - val_loss: 2.0241 - val_accuracy: 0.2937
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 1.5319 - accuracy: 0.4541
Epoch 48: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 104ms/step - loss: 1.5319 - accuracy: 0.4541 - val_loss: 1.6974 - val_accuracy: 0.4062
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 1.5196 - accuracy: 0.4512
Epoch 49: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 104ms/step - loss: 1.5196 - accuracy: 0.4512 - val_loss: 2.2677 - val_accuracy: 0.3250
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 1.5206 - accuracy: 0.4512
Epoch 50: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 107ms/step - loss: 1.5206 - accuracy: 0.4512 - val_loss: 1.8661 - val_accuracy: 0.4062
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 1.5237 - accuracy: 0.4629
Epoch 51: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 112ms/step - loss: 1.5237 - accuracy: 0.4629 - val_loss: 1.9110 - val_accuracy: 0.3250
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.4838 - accuracy: 0.4727
Epoch 52: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 111ms/step - loss: 1.4838 - accuracy: 0.4727 - val_loss: 1.6284 - val_accuracy: 0.4250
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 1.5545 - accuracy: 0.4199
Epoch 53: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 106ms/step - loss: 1.5545 - accuracy: 0.4199 - val_loss: 1.4564 - val_accuracy: 0.4750
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 1.4792 - accuracy: 0.4561
Epoch 54: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 108ms/step - loss: 1.4792 - accuracy: 0.4561 - val_loss: 1.5171 - val_accuracy: 0.4812
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 1.4632 - accuracy: 0.4512
Epoch 55: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 107ms/step - loss: 1.4632 - accuracy: 0.4512 - val_loss: 1.6384 - val_accuracy: 0.4250
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 1.4624 - accuracy: 0.4707
Epoch 56: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 107ms/step - loss: 1.4624 - accuracy: 0.4707 - val_loss: 1.7668 - val_accuracy: 0.4563
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.4761 - accuracy: 0.4561
Epoch 57: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 107ms/step - loss: 1.4761 - accuracy: 0.4561 - val_loss: 1.7704 - val_accuracy: 0.4187
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 1.4924 - accuracy: 0.4658
Epoch 58: val_accuracy improved from 0.50625 to 0.52500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 29s 460ms/step - loss: 1.4924 - accuracy: 0.4658 - val_loss: 1.5019 - val_accuracy: 0.5250
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 1.5566 - accuracy: 0.4346
Epoch 59: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.5566 - accuracy: 0.4346 - val_loss: 1.5978 - val_accuracy: 0.4625
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.5502 - accuracy: 0.4297
Epoch 60: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 106ms/step - loss: 1.5502 - accuracy: 0.4297 - val_loss: 2.0731 - val_accuracy: 0.3500
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.4650 - accuracy: 0.4648
Epoch 61: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.4650 - accuracy: 0.4648 - val_loss: 1.4992 - val_accuracy: 0.5000
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 1.4647 - accuracy: 0.5039
Epoch 62: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 108ms/step - loss: 1.4647 - accuracy: 0.5039 - val_loss: 1.7177 - val_accuracy: 0.4500
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.5212 - accuracy: 0.4453
Epoch 63: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 103ms/step - loss: 1.5212 - accuracy: 0.4453 - val_loss: 1.5695 - val_accuracy: 0.5125
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 1.5157 - accuracy: 0.4736
Epoch 64: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 103ms/step - loss: 1.5157 - accuracy: 0.4736 - val_loss: 1.7630 - val_accuracy: 0.3938
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.5267 - accuracy: 0.4199
Epoch 65: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 106ms/step - loss: 1.5267 - accuracy: 0.4199 - val_loss: 1.5744 - val_accuracy: 0.4375
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.4379 - accuracy: 0.4883
Epoch 66: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 107ms/step - loss: 1.4379 - accuracy: 0.4883 - val_loss: 1.7750 - val_accuracy: 0.4437
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.4811 - accuracy: 0.4541
Epoch 67: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 106ms/step - loss: 1.4811 - accuracy: 0.4541 - val_loss: 1.6242 - val_accuracy: 0.4437
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 1.4020 - accuracy: 0.4971
Epoch 68: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.4020 - accuracy: 0.4971 - val_loss: 1.7468 - val_accuracy: 0.4125
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.4997 - accuracy: 0.4697
Epoch 69: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.4997 - accuracy: 0.4697 - val_loss: 1.9306 - val_accuracy: 0.4062
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.4636 - accuracy: 0.4795
Epoch 70: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.4636 - accuracy: 0.4795 - val_loss: 1.5397 - val_accuracy: 0.4625
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.4838 - accuracy: 0.4678
Epoch 71: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.4838 - accuracy: 0.4678 - val_loss: 1.4950 - val_accuracy: 0.4938
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.4245 - accuracy: 0.4893
Epoch 72: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.4245 - accuracy: 0.4893 - val_loss: 1.5151 - val_accuracy: 0.4625
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.4605 - accuracy: 0.4707
Epoch 73: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.4605 - accuracy: 0.4707 - val_loss: 1.5582 - val_accuracy: 0.4625
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.4142 - accuracy: 0.4883
Epoch 74: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.4142 - accuracy: 0.4883 - val_loss: 1.7057 - val_accuracy: 0.4437
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.4426 - accuracy: 0.4717
Epoch 75: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 103ms/step - loss: 1.4426 - accuracy: 0.4717 - val_loss: 1.6756 - val_accuracy: 0.4375
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.4706 - accuracy: 0.4619
Epoch 76: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.4706 - accuracy: 0.4619 - val_loss: 1.6434 - val_accuracy: 0.4812
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.4382 - accuracy: 0.4863
Epoch 77: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 105ms/step - loss: 1.4382 - accuracy: 0.4863 - val_loss: 1.6838 - val_accuracy: 0.4563
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.3947 - accuracy: 0.5059
Epoch 78: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.3947 - accuracy: 0.5059 - val_loss: 1.8720 - val_accuracy: 0.4062
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.4470 - accuracy: 0.4678
Epoch 79: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.4470 - accuracy: 0.4678 - val_loss: 1.4979 - val_accuracy: 0.5250
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.3930 - accuracy: 0.5156
Epoch 80: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 105ms/step - loss: 1.3930 - accuracy: 0.5156 - val_loss: 1.8121 - val_accuracy: 0.4125
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.4679 - accuracy: 0.4824
Epoch 81: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 103ms/step - loss: 1.4679 - accuracy: 0.4824 - val_loss: 1.6343 - val_accuracy: 0.4500
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.4269 - accuracy: 0.4990
Epoch 82: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.4269 - accuracy: 0.4990 - val_loss: 1.6011 - val_accuracy: 0.4313
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.4466 - accuracy: 0.5127
Epoch 83: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 103ms/step - loss: 1.4466 - accuracy: 0.5127 - val_loss: 1.7736 - val_accuracy: 0.4563
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.4224 - accuracy: 0.4932
Epoch 84: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 103ms/step - loss: 1.4224 - accuracy: 0.4932 - val_loss: 1.5448 - val_accuracy: 0.4625
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.4505 - accuracy: 0.4814
Epoch 85: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.4505 - accuracy: 0.4814 - val_loss: 1.6425 - val_accuracy: 0.4500
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.4151 - accuracy: 0.4941
Epoch 86: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.4151 - accuracy: 0.4941 - val_loss: 1.3871 - val_accuracy: 0.5125
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.3500 - accuracy: 0.5020
Epoch 87: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 105ms/step - loss: 1.3500 - accuracy: 0.5020 - val_loss: 1.5208 - val_accuracy: 0.4563
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.4366 - accuracy: 0.4775
Epoch 88: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.4366 - accuracy: 0.4775 - val_loss: 1.5407 - val_accuracy: 0.4812
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.4361 - accuracy: 0.4805
Epoch 89: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 103ms/step - loss: 1.4361 - accuracy: 0.4805 - val_loss: 1.8043 - val_accuracy: 0.4062
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.4979 - accuracy: 0.4805
Epoch 90: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 105ms/step - loss: 1.4979 - accuracy: 0.4805 - val_loss: 1.4669 - val_accuracy: 0.4625
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.3780 - accuracy: 0.4980
Epoch 91: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.3780 - accuracy: 0.4980 - val_loss: 1.6739 - val_accuracy: 0.4688
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.3541 - accuracy: 0.4814
Epoch 92: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.3541 - accuracy: 0.4814 - val_loss: 1.5042 - val_accuracy: 0.4812
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.4046 - accuracy: 0.4961
Epoch 93: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 103ms/step - loss: 1.4046 - accuracy: 0.4961 - val_loss: 1.4637 - val_accuracy: 0.5125
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.4981 - accuracy: 0.4932
Epoch 94: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 102ms/step - loss: 1.4981 - accuracy: 0.4932 - val_loss: 1.4601 - val_accuracy: 0.4938
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.3733 - accuracy: 0.5381
Epoch 95: val_accuracy did not improve from 0.52500
64/64 [==============================] - 7s 104ms/step - loss: 1.3733 - accuracy: 0.5381 - val_loss: 1.4400 - val_accuracy: 0.5063
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.3952 - accuracy: 0.4912
Epoch 96: val_accuracy improved from 0.52500 to 0.55000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_1.h5
64/64 [==============================] - 31s 490ms/step - loss: 1.3952 - accuracy: 0.4912 - val_loss: 1.3826 - val_accuracy: 0.5500
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.3861 - accuracy: 0.5107
Epoch 97: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 102ms/step - loss: 1.3861 - accuracy: 0.5107 - val_loss: 1.6908 - val_accuracy: 0.4750
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.4213 - accuracy: 0.4863
Epoch 98: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 103ms/step - loss: 1.4213 - accuracy: 0.4863 - val_loss: 1.4225 - val_accuracy: 0.5063
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.5202 - accuracy: 0.4785
Epoch 99: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 104ms/step - loss: 1.5202 - accuracy: 0.4785 - val_loss: 1.5904 - val_accuracy: 0.4688
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.4948 - accuracy: 0.4775
Epoch 100: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 104ms/step - loss: 1.4948 - accuracy: 0.4775 - val_loss: 1.3256 - val_accuracy: 0.5000
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.4358 - accuracy: 0.4775
Epoch 101: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 104ms/step - loss: 1.4358 - accuracy: 0.4775 - val_loss: 1.3496 - val_accuracy: 0.4875
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.3475 - accuracy: 0.5166
Epoch 102: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 103ms/step - loss: 1.3475 - accuracy: 0.5166 - val_loss: 1.5244 - val_accuracy: 0.4437
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.4159 - accuracy: 0.4814
Epoch 103: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 103ms/step - loss: 1.4159 - accuracy: 0.4814 - val_loss: 1.5275 - val_accuracy: 0.4875
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.4370 - accuracy: 0.4795
Epoch 104: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 103ms/step - loss: 1.4370 - accuracy: 0.4795 - val_loss: 1.5317 - val_accuracy: 0.4437
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.4831 - accuracy: 0.4590
Epoch 105: val_accuracy did not improve from 0.55000
64/64 [==============================] - 7s 103ms/step - loss: 1.4831 - accuracy: 0.4590 - val_loss: 1.4852 - val_accuracy: 0.4938
********* Training time: 1185.859375 s.
*****************
* Model Summary *
*****************
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
dense (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 119,595,020
Non-trainable params: 14,714,688
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 20s 87ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.5966 0.9311 0.7273 305
anarrhichomenum 0.8261 0.5311 0.6465 322
brevantherum 0.5946 0.1654 0.2588 266
dulcamara 0.4483 0.1254 0.1960 311
herposolanum 0.3778 0.4265 0.4007 279
holophylla 0.4006 0.4858 0.4391 282
lasiocarpa 0.9516 0.7540 0.8414 313
melongena 0.7626 0.6242 0.6865 314
micracantha 0.4686 0.3746 0.4164 299
petota 0.2255 0.8600 0.3573 300
solanum 0.2914 0.1678 0.2129 304
torva 0.5833 0.0689 0.1232 305
accuracy 0.4633 3600
macro avg 0.5439 0.4596 0.4422 3600
weighted avg 0.5488 0.4633 0.4468 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
*****************************
* Started at 1211.296875... *
*****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 16.3871 - accuracy: 0.1191
Epoch 1: val_accuracy improved from -inf to 0.18125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 28s 438ms/step - loss: 16.3871 - accuracy: 0.1191 - val_loss: 2.2480 - val_accuracy: 0.1813
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 2.1493 - accuracy: 0.2236
Epoch 2: val_accuracy improved from 0.18125 to 0.25000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 27s 418ms/step - loss: 2.1493 - accuracy: 0.2236 - val_loss: 2.0837 - val_accuracy: 0.2500
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 2.0542 - accuracy: 0.2520
Epoch 3: val_accuracy did not improve from 0.25000
64/64 [==============================] - 7s 100ms/step - loss: 2.0542 - accuracy: 0.2520 - val_loss: 2.2026 - val_accuracy: 0.2125
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 1.9634 - accuracy: 0.2900
Epoch 4: val_accuracy improved from 0.25000 to 0.26875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 27s 431ms/step - loss: 1.9634 - accuracy: 0.2900 - val_loss: 1.9547 - val_accuracy: 0.2688
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 1.8545 - accuracy: 0.3164
Epoch 5: val_accuracy did not improve from 0.26875
64/64 [==============================] - 7s 100ms/step - loss: 1.8545 - accuracy: 0.3164 - val_loss: 2.3121 - val_accuracy: 0.2500
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 1.9023 - accuracy: 0.3193
Epoch 6: val_accuracy did not improve from 0.26875
64/64 [==============================] - 7s 103ms/step - loss: 1.9023 - accuracy: 0.3193 - val_loss: 2.0366 - val_accuracy: 0.2562
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 1.8571 - accuracy: 0.3213
Epoch 7: val_accuracy did not improve from 0.26875
64/64 [==============================] - 7s 103ms/step - loss: 1.8571 - accuracy: 0.3213 - val_loss: 1.9992 - val_accuracy: 0.2625
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 1.8012 - accuracy: 0.3320
Epoch 8: val_accuracy improved from 0.26875 to 0.30000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 27s 433ms/step - loss: 1.8012 - accuracy: 0.3320 - val_loss: 1.8987 - val_accuracy: 0.3000
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 1.7224 - accuracy: 0.3896
Epoch 9: val_accuracy improved from 0.30000 to 0.32500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 28s 437ms/step - loss: 1.7224 - accuracy: 0.3896 - val_loss: 1.8684 - val_accuracy: 0.3250
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 1.7510 - accuracy: 0.3613
Epoch 10: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 103ms/step - loss: 1.7510 - accuracy: 0.3613 - val_loss: 2.0537 - val_accuracy: 0.2937
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 1.7817 - accuracy: 0.3623
Epoch 11: val_accuracy improved from 0.32500 to 0.36875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 29s 459ms/step - loss: 1.7817 - accuracy: 0.3623 - val_loss: 1.9210 - val_accuracy: 0.3688
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 1.7672 - accuracy: 0.3535
Epoch 12: val_accuracy improved from 0.36875 to 0.40625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 28s 432ms/step - loss: 1.7672 - accuracy: 0.3535 - val_loss: 1.7583 - val_accuracy: 0.4062
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 1.6562 - accuracy: 0.3906
Epoch 13: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.6562 - accuracy: 0.3906 - val_loss: 1.7859 - val_accuracy: 0.3750
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 1.7044 - accuracy: 0.3770
Epoch 14: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 103ms/step - loss: 1.7044 - accuracy: 0.3770 - val_loss: 2.0178 - val_accuracy: 0.3250
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 1.7101 - accuracy: 0.3691
Epoch 15: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.7101 - accuracy: 0.3691 - val_loss: 1.7343 - val_accuracy: 0.3187
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 1.7359 - accuracy: 0.3789
Epoch 16: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 104ms/step - loss: 1.7359 - accuracy: 0.3789 - val_loss: 2.1638 - val_accuracy: 0.3000
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 1.7549 - accuracy: 0.3711
Epoch 17: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 103ms/step - loss: 1.7549 - accuracy: 0.3711 - val_loss: 1.8435 - val_accuracy: 0.2937
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 1.6975 - accuracy: 0.3770
Epoch 18: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.6975 - accuracy: 0.3770 - val_loss: 1.8149 - val_accuracy: 0.3375
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 1.6036 - accuracy: 0.4307
Epoch 19: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.6036 - accuracy: 0.4307 - val_loss: 1.8716 - val_accuracy: 0.3688
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 1.6699 - accuracy: 0.4023
Epoch 20: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.6699 - accuracy: 0.4023 - val_loss: 2.0249 - val_accuracy: 0.3438
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 1.6352 - accuracy: 0.4033
Epoch 21: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.6352 - accuracy: 0.4033 - val_loss: 1.7445 - val_accuracy: 0.3750
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 1.6469 - accuracy: 0.4102
Epoch 22: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 101ms/step - loss: 1.6469 - accuracy: 0.4102 - val_loss: 1.8729 - val_accuracy: 0.3500
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 1.6306 - accuracy: 0.4023
Epoch 23: val_accuracy did not improve from 0.40625
64/64 [==============================] - 6s 101ms/step - loss: 1.6306 - accuracy: 0.4023 - val_loss: 1.6248 - val_accuracy: 0.4062
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 1.5786 - accuracy: 0.4189
Epoch 24: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.5786 - accuracy: 0.4189 - val_loss: 1.8130 - val_accuracy: 0.4062
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 1.5609 - accuracy: 0.4453
Epoch 25: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.5609 - accuracy: 0.4453 - val_loss: 2.0323 - val_accuracy: 0.3187
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 1.6195 - accuracy: 0.4131
Epoch 26: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 102ms/step - loss: 1.6195 - accuracy: 0.4131 - val_loss: 1.5706 - val_accuracy: 0.3875
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 1.5749 - accuracy: 0.4336
Epoch 27: val_accuracy did not improve from 0.40625
64/64 [==============================] - 6s 101ms/step - loss: 1.5749 - accuracy: 0.4336 - val_loss: 1.9523 - val_accuracy: 0.3625
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 1.6169 - accuracy: 0.4355
Epoch 28: val_accuracy improved from 0.40625 to 0.43750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 28s 444ms/step - loss: 1.6169 - accuracy: 0.4355 - val_loss: 1.6352 - val_accuracy: 0.4375
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 1.5686 - accuracy: 0.4277
Epoch 29: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 100ms/step - loss: 1.5686 - accuracy: 0.4277 - val_loss: 1.6465 - val_accuracy: 0.4313
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 1.4912 - accuracy: 0.4766
Epoch 30: val_accuracy improved from 0.43750 to 0.46250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 29s 454ms/step - loss: 1.4912 - accuracy: 0.4766 - val_loss: 1.4947 - val_accuracy: 0.4625
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 1.5191 - accuracy: 0.4492
Epoch 31: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 101ms/step - loss: 1.5191 - accuracy: 0.4492 - val_loss: 1.7154 - val_accuracy: 0.4187
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 1.5092 - accuracy: 0.4639
Epoch 32: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 103ms/step - loss: 1.5092 - accuracy: 0.4639 - val_loss: 1.5895 - val_accuracy: 0.4313
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 1.5359 - accuracy: 0.4492
Epoch 33: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 102ms/step - loss: 1.5359 - accuracy: 0.4492 - val_loss: 1.5994 - val_accuracy: 0.4062
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 1.5372 - accuracy: 0.4551
Epoch 34: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 103ms/step - loss: 1.5372 - accuracy: 0.4551 - val_loss: 1.8490 - val_accuracy: 0.3438
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 1.5305 - accuracy: 0.4531
Epoch 35: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 102ms/step - loss: 1.5305 - accuracy: 0.4531 - val_loss: 1.5525 - val_accuracy: 0.4375
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 1.5019 - accuracy: 0.4541
Epoch 36: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 103ms/step - loss: 1.5019 - accuracy: 0.4541 - val_loss: 1.9044 - val_accuracy: 0.4187
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 1.5379 - accuracy: 0.4600
Epoch 37: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 102ms/step - loss: 1.5379 - accuracy: 0.4600 - val_loss: 1.5478 - val_accuracy: 0.4500
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 1.5357 - accuracy: 0.4619
Epoch 38: val_accuracy improved from 0.46250 to 0.50000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 29s 456ms/step - loss: 1.5357 - accuracy: 0.4619 - val_loss: 1.5461 - val_accuracy: 0.5000
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 1.4690 - accuracy: 0.4600
Epoch 39: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 101ms/step - loss: 1.4690 - accuracy: 0.4600 - val_loss: 1.6265 - val_accuracy: 0.4125
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 1.4988 - accuracy: 0.4805
Epoch 40: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4988 - accuracy: 0.4805 - val_loss: 1.5442 - val_accuracy: 0.4812
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 1.4779 - accuracy: 0.4785
Epoch 41: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.4779 - accuracy: 0.4785 - val_loss: 1.6920 - val_accuracy: 0.4125
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 1.4609 - accuracy: 0.4912
Epoch 42: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4609 - accuracy: 0.4912 - val_loss: 1.5677 - val_accuracy: 0.4375
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 1.5125 - accuracy: 0.4639
Epoch 43: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.5125 - accuracy: 0.4639 - val_loss: 1.5863 - val_accuracy: 0.4250
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 1.6024 - accuracy: 0.4238
Epoch 44: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.6024 - accuracy: 0.4238 - val_loss: 1.8969 - val_accuracy: 0.3688
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 1.4594 - accuracy: 0.4795
Epoch 45: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4594 - accuracy: 0.4795 - val_loss: 1.5980 - val_accuracy: 0.4187
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 1.4983 - accuracy: 0.4531
Epoch 46: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.4983 - accuracy: 0.4531 - val_loss: 1.6318 - val_accuracy: 0.4313
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 1.5174 - accuracy: 0.4746
Epoch 47: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.5174 - accuracy: 0.4746 - val_loss: 1.6752 - val_accuracy: 0.4313
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 1.4342 - accuracy: 0.5156
Epoch 48: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4342 - accuracy: 0.5156 - val_loss: 1.4710 - val_accuracy: 0.4938
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 1.4328 - accuracy: 0.4795
Epoch 49: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.4328 - accuracy: 0.4795 - val_loss: 1.5878 - val_accuracy: 0.4563
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 1.4426 - accuracy: 0.5039
Epoch 50: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4426 - accuracy: 0.5039 - val_loss: 1.8941 - val_accuracy: 0.4000
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 1.4973 - accuracy: 0.4834
Epoch 51: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4973 - accuracy: 0.4834 - val_loss: 1.4547 - val_accuracy: 0.4688
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.3864 - accuracy: 0.5225
Epoch 52: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.3864 - accuracy: 0.5225 - val_loss: 1.6489 - val_accuracy: 0.4375
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 1.4612 - accuracy: 0.4951
Epoch 53: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.4612 - accuracy: 0.4951 - val_loss: 1.5791 - val_accuracy: 0.4688
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 1.4513 - accuracy: 0.4893
Epoch 54: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4513 - accuracy: 0.4893 - val_loss: 1.5249 - val_accuracy: 0.5000
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 1.4383 - accuracy: 0.4902
Epoch 55: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.4383 - accuracy: 0.4902 - val_loss: 1.4272 - val_accuracy: 0.4875
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 1.4596 - accuracy: 0.4844
Epoch 56: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4596 - accuracy: 0.4844 - val_loss: 1.5880 - val_accuracy: 0.4375
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.3679 - accuracy: 0.5186
Epoch 57: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.3679 - accuracy: 0.5186 - val_loss: 1.4927 - val_accuracy: 0.4750
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 1.4127 - accuracy: 0.5049
Epoch 58: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.4127 - accuracy: 0.5049 - val_loss: 1.4844 - val_accuracy: 0.4812
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 1.4553 - accuracy: 0.4756
Epoch 59: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.4553 - accuracy: 0.4756 - val_loss: 1.6643 - val_accuracy: 0.4062
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.3804 - accuracy: 0.5146
Epoch 60: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.3804 - accuracy: 0.5146 - val_loss: 1.6462 - val_accuracy: 0.4250
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.3940 - accuracy: 0.5176
Epoch 61: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.3940 - accuracy: 0.5176 - val_loss: 1.5588 - val_accuracy: 0.4688
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 1.4338 - accuracy: 0.5029
Epoch 62: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 103ms/step - loss: 1.4338 - accuracy: 0.5029 - val_loss: 1.4736 - val_accuracy: 0.4625
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.3177 - accuracy: 0.5371
Epoch 63: val_accuracy did not improve from 0.50000
64/64 [==============================] - 7s 102ms/step - loss: 1.3177 - accuracy: 0.5371 - val_loss: 1.6220 - val_accuracy: 0.4625
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 1.3748 - accuracy: 0.4990
Epoch 64: val_accuracy improved from 0.50000 to 0.50625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 31s 482ms/step - loss: 1.3748 - accuracy: 0.4990 - val_loss: 1.4365 - val_accuracy: 0.5063
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.3946 - accuracy: 0.5059
Epoch 65: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 101ms/step - loss: 1.3946 - accuracy: 0.5059 - val_loss: 1.5580 - val_accuracy: 0.4500
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.4763 - accuracy: 0.4746
Epoch 66: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 104ms/step - loss: 1.4763 - accuracy: 0.4746 - val_loss: 1.4200 - val_accuracy: 0.4563
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.4183 - accuracy: 0.4912
Epoch 67: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4183 - accuracy: 0.4912 - val_loss: 1.5226 - val_accuracy: 0.4625
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 1.4139 - accuracy: 0.4980
Epoch 68: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.4139 - accuracy: 0.4980 - val_loss: 2.0720 - val_accuracy: 0.3625
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.4706 - accuracy: 0.4883
Epoch 69: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.4706 - accuracy: 0.4883 - val_loss: 1.5608 - val_accuracy: 0.4812
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.4756 - accuracy: 0.4941
Epoch 70: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4756 - accuracy: 0.4941 - val_loss: 1.4504 - val_accuracy: 0.4688
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.4369 - accuracy: 0.4971
Epoch 71: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.4369 - accuracy: 0.4971 - val_loss: 1.4509 - val_accuracy: 0.4812
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.3476 - accuracy: 0.5381
Epoch 72: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.3476 - accuracy: 0.5381 - val_loss: 1.6290 - val_accuracy: 0.4812
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.3081 - accuracy: 0.5459
Epoch 73: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.3081 - accuracy: 0.5459 - val_loss: 1.5180 - val_accuracy: 0.5000
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.4072 - accuracy: 0.5010
Epoch 74: val_accuracy improved from 0.50625 to 0.51250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 30s 472ms/step - loss: 1.4072 - accuracy: 0.5010 - val_loss: 1.4425 - val_accuracy: 0.5125
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.3383 - accuracy: 0.5225
Epoch 75: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 102ms/step - loss: 1.3383 - accuracy: 0.5225 - val_loss: 1.5875 - val_accuracy: 0.4500
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.3516 - accuracy: 0.5293
Epoch 76: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 103ms/step - loss: 1.3516 - accuracy: 0.5293 - val_loss: 1.4397 - val_accuracy: 0.5063
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.3915 - accuracy: 0.5244
Epoch 77: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 102ms/step - loss: 1.3915 - accuracy: 0.5244 - val_loss: 1.6090 - val_accuracy: 0.4375
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.2901 - accuracy: 0.5430
Epoch 78: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 102ms/step - loss: 1.2901 - accuracy: 0.5430 - val_loss: 1.4321 - val_accuracy: 0.4500
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.3121 - accuracy: 0.5322
Epoch 79: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 103ms/step - loss: 1.3121 - accuracy: 0.5322 - val_loss: 1.5119 - val_accuracy: 0.4875
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.3120 - accuracy: 0.5352
Epoch 80: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 103ms/step - loss: 1.3120 - accuracy: 0.5352 - val_loss: 1.6427 - val_accuracy: 0.4625
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.3788 - accuracy: 0.5068
Epoch 81: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 102ms/step - loss: 1.3788 - accuracy: 0.5068 - val_loss: 1.5633 - val_accuracy: 0.4500
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.3722 - accuracy: 0.5234
Epoch 82: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 102ms/step - loss: 1.3722 - accuracy: 0.5234 - val_loss: 1.3944 - val_accuracy: 0.5125
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.3339 - accuracy: 0.5312
Epoch 83: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 102ms/step - loss: 1.3339 - accuracy: 0.5312 - val_loss: 1.5706 - val_accuracy: 0.4375
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.3736 - accuracy: 0.5225
Epoch 84: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 103ms/step - loss: 1.3736 - accuracy: 0.5225 - val_loss: 1.6020 - val_accuracy: 0.4375
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.4229 - accuracy: 0.5127
Epoch 85: val_accuracy improved from 0.51250 to 0.54375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_2.h5
64/64 [==============================] - 29s 456ms/step - loss: 1.4229 - accuracy: 0.5127 - val_loss: 1.2757 - val_accuracy: 0.5437
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.3488 - accuracy: 0.5000
Epoch 86: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.3488 - accuracy: 0.5000 - val_loss: 1.3399 - val_accuracy: 0.5125
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.3441 - accuracy: 0.5361
Epoch 87: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.3441 - accuracy: 0.5361 - val_loss: 1.4370 - val_accuracy: 0.4875
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.3096 - accuracy: 0.5381
Epoch 88: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.3096 - accuracy: 0.5381 - val_loss: 1.4781 - val_accuracy: 0.4688
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.3485 - accuracy: 0.5410
Epoch 89: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.3485 - accuracy: 0.5410 - val_loss: 1.4840 - val_accuracy: 0.4812
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.3246 - accuracy: 0.5205
Epoch 90: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.3246 - accuracy: 0.5205 - val_loss: 1.3527 - val_accuracy: 0.4500
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.3013 - accuracy: 0.5498
Epoch 91: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.3013 - accuracy: 0.5498 - val_loss: 1.4252 - val_accuracy: 0.5125
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.2956 - accuracy: 0.5283
Epoch 92: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.2956 - accuracy: 0.5283 - val_loss: 1.5071 - val_accuracy: 0.5000
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.3344 - accuracy: 0.5234
Epoch 93: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.3344 - accuracy: 0.5234 - val_loss: 1.4249 - val_accuracy: 0.4938
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.3531 - accuracy: 0.5205
Epoch 94: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.3531 - accuracy: 0.5205 - val_loss: 1.6195 - val_accuracy: 0.4563
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.3555 - accuracy: 0.5283
Epoch 95: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.3555 - accuracy: 0.5283 - val_loss: 1.3269 - val_accuracy: 0.5250
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.3656 - accuracy: 0.5381
Epoch 96: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.3656 - accuracy: 0.5381 - val_loss: 1.8258 - val_accuracy: 0.4375
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.3899 - accuracy: 0.5176
Epoch 97: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.3899 - accuracy: 0.5176 - val_loss: 1.4017 - val_accuracy: 0.4625
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.3450 - accuracy: 0.5371
Epoch 98: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.3450 - accuracy: 0.5371 - val_loss: 1.2905 - val_accuracy: 0.5375
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.2814 - accuracy: 0.5479
Epoch 99: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.2814 - accuracy: 0.5479 - val_loss: 1.7082 - val_accuracy: 0.4625
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.2784 - accuracy: 0.5479
Epoch 100: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.2784 - accuracy: 0.5479 - val_loss: 1.4784 - val_accuracy: 0.4875
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.2793 - accuracy: 0.5596
Epoch 101: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.2793 - accuracy: 0.5596 - val_loss: 1.2870 - val_accuracy: 0.5250
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.3453 - accuracy: 0.5410
Epoch 102: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.3453 - accuracy: 0.5410 - val_loss: 1.3056 - val_accuracy: 0.5250
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.3055 - accuracy: 0.5410
Epoch 103: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.3055 - accuracy: 0.5410 - val_loss: 1.6197 - val_accuracy: 0.4750
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.2452 - accuracy: 0.5439
Epoch 104: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.2452 - accuracy: 0.5439 - val_loss: 1.3508 - val_accuracy: 0.5250
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.3001 - accuracy: 0.5625
Epoch 105: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.3001 - accuracy: 0.5625 - val_loss: 1.5886 - val_accuracy: 0.4250
********* Training time: 1149.75 s.
*****************
* Model Summary *
*****************
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
dense_1 (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 119,595,020
Non-trainable params: 14,714,688
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 5s 24ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.9266 0.5921 0.7225 277
anarrhichomenum 0.4472 0.9270 0.6033 315
brevantherum 0.4957 0.3779 0.4288 307
dulcamara 0.3161 0.2054 0.2490 297
herposolanum 0.3324 0.8056 0.4706 288
holophylla 0.5339 0.4685 0.4991 286
lasiocarpa 0.7391 0.8662 0.7977 314
melongena 0.8333 0.1546 0.2609 291
micracantha 0.5922 0.3630 0.4501 292
petota 0.4645 0.5579 0.5069 328
solanum 0.3371 0.3947 0.3636 304
torva 0.4651 0.0664 0.1163 301
accuracy 0.4847 3600
macro avg 0.5403 0.4816 0.4557 3600
weighted avg 0.5373 0.4847 0.4566 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
*****************************
* Started at 2373.234375... *
*****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 13.9492 - accuracy: 0.1250
Epoch 1: val_accuracy improved from -inf to 0.17500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 26s 410ms/step - loss: 13.9492 - accuracy: 0.1250 - val_loss: 2.2366 - val_accuracy: 0.1750
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 2.2193 - accuracy: 0.2012
Epoch 2: val_accuracy improved from 0.17500 to 0.29375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 24s 370ms/step - loss: 2.2193 - accuracy: 0.2012 - val_loss: 1.9254 - val_accuracy: 0.2937
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 2.0137 - accuracy: 0.2715
Epoch 3: val_accuracy improved from 0.29375 to 0.33125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 27s 419ms/step - loss: 2.0137 - accuracy: 0.2715 - val_loss: 1.7977 - val_accuracy: 0.3313
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 1.9923 - accuracy: 0.2549
Epoch 4: val_accuracy improved from 0.33125 to 0.36875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 26s 402ms/step - loss: 1.9923 - accuracy: 0.2549 - val_loss: 1.6853 - val_accuracy: 0.3688
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 1.9293 - accuracy: 0.3037
Epoch 5: val_accuracy did not improve from 0.36875
64/64 [==============================] - 7s 101ms/step - loss: 1.9293 - accuracy: 0.3037 - val_loss: 1.7096 - val_accuracy: 0.3438
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 1.8189 - accuracy: 0.3174
Epoch 6: val_accuracy did not improve from 0.36875
64/64 [==============================] - 7s 103ms/step - loss: 1.8189 - accuracy: 0.3174 - val_loss: 1.6366 - val_accuracy: 0.3562
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 1.9481 - accuracy: 0.2930
Epoch 7: val_accuracy did not improve from 0.36875
64/64 [==============================] - 7s 102ms/step - loss: 1.9481 - accuracy: 0.2930 - val_loss: 1.7131 - val_accuracy: 0.3688
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 1.9226 - accuracy: 0.2998
Epoch 8: val_accuracy did not improve from 0.36875
64/64 [==============================] - 7s 103ms/step - loss: 1.9226 - accuracy: 0.2998 - val_loss: 1.8304 - val_accuracy: 0.2500
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 1.8140 - accuracy: 0.3350
Epoch 9: val_accuracy did not improve from 0.36875
64/64 [==============================] - 7s 102ms/step - loss: 1.8140 - accuracy: 0.3350 - val_loss: 1.7349 - val_accuracy: 0.3625
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 1.8789 - accuracy: 0.3223
Epoch 10: val_accuracy did not improve from 0.36875
64/64 [==============================] - 7s 103ms/step - loss: 1.8789 - accuracy: 0.3223 - val_loss: 1.6202 - val_accuracy: 0.3562
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 1.7481 - accuracy: 0.3447
Epoch 11: val_accuracy did not improve from 0.36875
64/64 [==============================] - 7s 102ms/step - loss: 1.7481 - accuracy: 0.3447 - val_loss: 1.8148 - val_accuracy: 0.2688
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 1.8045 - accuracy: 0.3398
Epoch 12: val_accuracy improved from 0.36875 to 0.41250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 27s 420ms/step - loss: 1.8045 - accuracy: 0.3398 - val_loss: 1.5289 - val_accuracy: 0.4125
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 1.7600 - accuracy: 0.3623
Epoch 13: val_accuracy did not improve from 0.41250
64/64 [==============================] - 7s 100ms/step - loss: 1.7600 - accuracy: 0.3623 - val_loss: 1.6841 - val_accuracy: 0.3812
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 1.7457 - accuracy: 0.3633
Epoch 14: val_accuracy did not improve from 0.41250
64/64 [==============================] - 7s 101ms/step - loss: 1.7457 - accuracy: 0.3633 - val_loss: 1.5344 - val_accuracy: 0.3812
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 1.7504 - accuracy: 0.3721
Epoch 15: val_accuracy did not improve from 0.41250
64/64 [==============================] - 7s 101ms/step - loss: 1.7504 - accuracy: 0.3721 - val_loss: 1.6686 - val_accuracy: 0.3750
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 1.7211 - accuracy: 0.3535
Epoch 16: val_accuracy did not improve from 0.41250
64/64 [==============================] - 6s 100ms/step - loss: 1.7211 - accuracy: 0.3535 - val_loss: 1.6678 - val_accuracy: 0.3500
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 1.6691 - accuracy: 0.3662
Epoch 17: val_accuracy did not improve from 0.41250
64/64 [==============================] - 7s 102ms/step - loss: 1.6691 - accuracy: 0.3662 - val_loss: 1.7364 - val_accuracy: 0.3438
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 1.6798 - accuracy: 0.3916
Epoch 18: val_accuracy did not improve from 0.41250
64/64 [==============================] - 7s 102ms/step - loss: 1.6798 - accuracy: 0.3916 - val_loss: 1.5583 - val_accuracy: 0.4062
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 1.7385 - accuracy: 0.3672
Epoch 19: val_accuracy improved from 0.41250 to 0.45625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 24s 371ms/step - loss: 1.7385 - accuracy: 0.3672 - val_loss: 1.4914 - val_accuracy: 0.4563
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 1.6910 - accuracy: 0.4043
Epoch 20: val_accuracy did not improve from 0.45625
64/64 [==============================] - 7s 101ms/step - loss: 1.6910 - accuracy: 0.4043 - val_loss: 1.4718 - val_accuracy: 0.4313
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 1.7485 - accuracy: 0.3525
Epoch 21: val_accuracy did not improve from 0.45625
64/64 [==============================] - 7s 103ms/step - loss: 1.7485 - accuracy: 0.3525 - val_loss: 1.5568 - val_accuracy: 0.3438
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 1.6549 - accuracy: 0.3770
Epoch 22: val_accuracy did not improve from 0.45625
64/64 [==============================] - 7s 103ms/step - loss: 1.6549 - accuracy: 0.3770 - val_loss: 1.6153 - val_accuracy: 0.3438
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 1.6582 - accuracy: 0.3955
Epoch 23: val_accuracy improved from 0.45625 to 0.46250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 27s 419ms/step - loss: 1.6582 - accuracy: 0.3955 - val_loss: 1.3921 - val_accuracy: 0.4625
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 1.6832 - accuracy: 0.3965
Epoch 24: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 100ms/step - loss: 1.6832 - accuracy: 0.3965 - val_loss: 1.5592 - val_accuracy: 0.4000
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 1.6748 - accuracy: 0.3750
Epoch 25: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 102ms/step - loss: 1.6748 - accuracy: 0.3750 - val_loss: 1.5604 - val_accuracy: 0.4062
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 1.6247 - accuracy: 0.4102
Epoch 26: val_accuracy did not improve from 0.46250
64/64 [==============================] - 7s 102ms/step - loss: 1.6247 - accuracy: 0.4102 - val_loss: 1.8119 - val_accuracy: 0.2625
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 1.6456 - accuracy: 0.3799
Epoch 27: val_accuracy improved from 0.46250 to 0.48125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 26s 403ms/step - loss: 1.6456 - accuracy: 0.3799 - val_loss: 1.4013 - val_accuracy: 0.4812
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 1.6754 - accuracy: 0.3682
Epoch 28: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 101ms/step - loss: 1.6754 - accuracy: 0.3682 - val_loss: 1.4540 - val_accuracy: 0.4625
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 1.7488 - accuracy: 0.3691
Epoch 29: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 103ms/step - loss: 1.7488 - accuracy: 0.3691 - val_loss: 1.7092 - val_accuracy: 0.3375
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 1.6817 - accuracy: 0.3770
Epoch 30: val_accuracy improved from 0.48125 to 0.51875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 29s 463ms/step - loss: 1.6817 - accuracy: 0.3770 - val_loss: 1.3814 - val_accuracy: 0.5188
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 1.6031 - accuracy: 0.4004
Epoch 31: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 101ms/step - loss: 1.6031 - accuracy: 0.4004 - val_loss: 1.5201 - val_accuracy: 0.4625
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 1.6238 - accuracy: 0.4189
Epoch 32: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 102ms/step - loss: 1.6238 - accuracy: 0.4189 - val_loss: 1.4361 - val_accuracy: 0.4688
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 1.5343 - accuracy: 0.4248
Epoch 33: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 102ms/step - loss: 1.5343 - accuracy: 0.4248 - val_loss: 1.4479 - val_accuracy: 0.4750
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 1.6213 - accuracy: 0.4004
Epoch 34: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 102ms/step - loss: 1.6213 - accuracy: 0.4004 - val_loss: 1.3622 - val_accuracy: 0.5063
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 1.5877 - accuracy: 0.4248
Epoch 35: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 103ms/step - loss: 1.5877 - accuracy: 0.4248 - val_loss: 1.5495 - val_accuracy: 0.4688
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 1.6159 - accuracy: 0.4199
Epoch 36: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 102ms/step - loss: 1.6159 - accuracy: 0.4199 - val_loss: 1.6466 - val_accuracy: 0.4688
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 1.6216 - accuracy: 0.3916
Epoch 37: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 102ms/step - loss: 1.6216 - accuracy: 0.3916 - val_loss: 1.3399 - val_accuracy: 0.5000
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 1.5433 - accuracy: 0.4473
Epoch 38: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 103ms/step - loss: 1.5433 - accuracy: 0.4473 - val_loss: 1.3587 - val_accuracy: 0.4938
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 1.6331 - accuracy: 0.4014
Epoch 39: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 102ms/step - loss: 1.6331 - accuracy: 0.4014 - val_loss: 1.3785 - val_accuracy: 0.4625
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 1.5819 - accuracy: 0.4189
Epoch 40: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 102ms/step - loss: 1.5819 - accuracy: 0.4189 - val_loss: 1.3693 - val_accuracy: 0.5188
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 1.5481 - accuracy: 0.4258
Epoch 41: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 101ms/step - loss: 1.5481 - accuracy: 0.4258 - val_loss: 1.5886 - val_accuracy: 0.3938
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 1.6224 - accuracy: 0.4248
Epoch 42: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 103ms/step - loss: 1.6224 - accuracy: 0.4248 - val_loss: 1.4134 - val_accuracy: 0.5125
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 1.6149 - accuracy: 0.4111
Epoch 43: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 103ms/step - loss: 1.6149 - accuracy: 0.4111 - val_loss: 1.3886 - val_accuracy: 0.4563
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 1.5371 - accuracy: 0.4365
Epoch 44: val_accuracy did not improve from 0.51875
64/64 [==============================] - 7s 103ms/step - loss: 1.5371 - accuracy: 0.4365 - val_loss: 1.3166 - val_accuracy: 0.5000
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 1.5782 - accuracy: 0.4326
Epoch 45: val_accuracy improved from 0.51875 to 0.53125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 28s 447ms/step - loss: 1.5782 - accuracy: 0.4326 - val_loss: 1.3194 - val_accuracy: 0.5312
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 1.5697 - accuracy: 0.4473
Epoch 46: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 101ms/step - loss: 1.5697 - accuracy: 0.4473 - val_loss: 1.3254 - val_accuracy: 0.4875
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 1.5565 - accuracy: 0.4336
Epoch 47: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 104ms/step - loss: 1.5565 - accuracy: 0.4336 - val_loss: 1.3525 - val_accuracy: 0.5063
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 1.5202 - accuracy: 0.4365
Epoch 48: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5202 - accuracy: 0.4365 - val_loss: 1.6206 - val_accuracy: 0.3875
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 1.5129 - accuracy: 0.4619
Epoch 49: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5129 - accuracy: 0.4619 - val_loss: 1.3256 - val_accuracy: 0.4938
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 1.5595 - accuracy: 0.4307
Epoch 50: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5595 - accuracy: 0.4307 - val_loss: 1.3212 - val_accuracy: 0.4812
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 1.6031 - accuracy: 0.4297
Epoch 51: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.6031 - accuracy: 0.4297 - val_loss: 1.3493 - val_accuracy: 0.5063
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.5333 - accuracy: 0.4512
Epoch 52: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5333 - accuracy: 0.4512 - val_loss: 1.3619 - val_accuracy: 0.5125
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 1.5518 - accuracy: 0.4521
Epoch 53: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5518 - accuracy: 0.4521 - val_loss: 1.4650 - val_accuracy: 0.4750
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 1.5379 - accuracy: 0.4434
Epoch 54: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5379 - accuracy: 0.4434 - val_loss: 1.6789 - val_accuracy: 0.4250
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 1.5692 - accuracy: 0.4121
Epoch 55: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5692 - accuracy: 0.4121 - val_loss: 1.4892 - val_accuracy: 0.4375
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 1.4995 - accuracy: 0.4600
Epoch 56: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.4995 - accuracy: 0.4600 - val_loss: 1.3126 - val_accuracy: 0.5312
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.5580 - accuracy: 0.4443
Epoch 57: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5580 - accuracy: 0.4443 - val_loss: 1.7278 - val_accuracy: 0.3938
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 1.5308 - accuracy: 0.4375
Epoch 58: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5308 - accuracy: 0.4375 - val_loss: 1.3994 - val_accuracy: 0.4437
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 1.5601 - accuracy: 0.4277
Epoch 59: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5601 - accuracy: 0.4277 - val_loss: 1.4291 - val_accuracy: 0.4437
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.5991 - accuracy: 0.4248
Epoch 60: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5991 - accuracy: 0.4248 - val_loss: 1.4041 - val_accuracy: 0.4688
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.5132 - accuracy: 0.4385
Epoch 61: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5132 - accuracy: 0.4385 - val_loss: 1.3830 - val_accuracy: 0.4437
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 1.5690 - accuracy: 0.4346
Epoch 62: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5690 - accuracy: 0.4346 - val_loss: 1.3565 - val_accuracy: 0.4875
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.5529 - accuracy: 0.4336
Epoch 63: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5529 - accuracy: 0.4336 - val_loss: 1.2563 - val_accuracy: 0.4938
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 1.5526 - accuracy: 0.4395
Epoch 64: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5526 - accuracy: 0.4395 - val_loss: 1.4361 - val_accuracy: 0.4250
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.5329 - accuracy: 0.4336
Epoch 65: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5329 - accuracy: 0.4336 - val_loss: 1.4495 - val_accuracy: 0.4563
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.4661 - accuracy: 0.4766
Epoch 66: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.4661 - accuracy: 0.4766 - val_loss: 1.4157 - val_accuracy: 0.4625
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.5186 - accuracy: 0.4609
Epoch 67: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5186 - accuracy: 0.4609 - val_loss: 1.2997 - val_accuracy: 0.5188
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 1.5101 - accuracy: 0.4746
Epoch 68: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.5101 - accuracy: 0.4746 - val_loss: 1.3724 - val_accuracy: 0.4875
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.5192 - accuracy: 0.4561
Epoch 69: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5192 - accuracy: 0.4561 - val_loss: 1.5172 - val_accuracy: 0.4000
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.6428 - accuracy: 0.4229
Epoch 70: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 102ms/step - loss: 1.6428 - accuracy: 0.4229 - val_loss: 1.6652 - val_accuracy: 0.3688
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.5165 - accuracy: 0.4531
Epoch 71: val_accuracy did not improve from 0.53125
64/64 [==============================] - 7s 103ms/step - loss: 1.5165 - accuracy: 0.4531 - val_loss: 1.3466 - val_accuracy: 0.5125
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.4882 - accuracy: 0.4766
Epoch 72: val_accuracy improved from 0.53125 to 0.53750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 28s 437ms/step - loss: 1.4882 - accuracy: 0.4766 - val_loss: 1.2840 - val_accuracy: 0.5375
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.4849 - accuracy: 0.5029
Epoch 73: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 100ms/step - loss: 1.4849 - accuracy: 0.5029 - val_loss: 1.4033 - val_accuracy: 0.4938
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.7128 - accuracy: 0.3916
Epoch 74: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 102ms/step - loss: 1.7128 - accuracy: 0.3916 - val_loss: 1.3646 - val_accuracy: 0.4875
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.4944 - accuracy: 0.4590
Epoch 75: val_accuracy did not improve from 0.53750
64/64 [==============================] - 6s 101ms/step - loss: 1.4944 - accuracy: 0.4590 - val_loss: 1.5519 - val_accuracy: 0.4187
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.5283 - accuracy: 0.4463
Epoch 76: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 101ms/step - loss: 1.5283 - accuracy: 0.4463 - val_loss: 1.4079 - val_accuracy: 0.4625
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.5194 - accuracy: 0.4316
Epoch 77: val_accuracy improved from 0.53750 to 0.54375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 26s 407ms/step - loss: 1.5194 - accuracy: 0.4316 - val_loss: 1.2427 - val_accuracy: 0.5437
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.4621 - accuracy: 0.4590
Epoch 78: val_accuracy improved from 0.54375 to 0.56250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 29s 456ms/step - loss: 1.4621 - accuracy: 0.4590 - val_loss: 1.2224 - val_accuracy: 0.5625
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.5039 - accuracy: 0.4385
Epoch 79: val_accuracy did not improve from 0.56250
64/64 [==============================] - 7s 100ms/step - loss: 1.5039 - accuracy: 0.4385 - val_loss: 1.4229 - val_accuracy: 0.4938
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.4822 - accuracy: 0.4639
Epoch 80: val_accuracy did not improve from 0.56250
64/64 [==============================] - 7s 102ms/step - loss: 1.4822 - accuracy: 0.4639 - val_loss: 1.3038 - val_accuracy: 0.4437
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.4883 - accuracy: 0.4736
Epoch 81: val_accuracy did not improve from 0.56250
64/64 [==============================] - 6s 101ms/step - loss: 1.4883 - accuracy: 0.4736 - val_loss: 1.3777 - val_accuracy: 0.4750
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.4597 - accuracy: 0.4746
Epoch 82: val_accuracy did not improve from 0.56250
64/64 [==============================] - 6s 101ms/step - loss: 1.4597 - accuracy: 0.4746 - val_loss: 1.5361 - val_accuracy: 0.4563
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.4880 - accuracy: 0.4541
Epoch 83: val_accuracy did not improve from 0.56250
64/64 [==============================] - 7s 102ms/step - loss: 1.4880 - accuracy: 0.4541 - val_loss: 1.2326 - val_accuracy: 0.5625
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.4388 - accuracy: 0.4717
Epoch 84: val_accuracy did not improve from 0.56250
64/64 [==============================] - 6s 101ms/step - loss: 1.4388 - accuracy: 0.4717 - val_loss: 1.3539 - val_accuracy: 0.5000
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.4020 - accuracy: 0.4971
Epoch 85: val_accuracy did not improve from 0.56250
64/64 [==============================] - 7s 102ms/step - loss: 1.4020 - accuracy: 0.4971 - val_loss: 1.4432 - val_accuracy: 0.4313
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.4528 - accuracy: 0.4824
Epoch 86: val_accuracy did not improve from 0.56250
64/64 [==============================] - 7s 101ms/step - loss: 1.4528 - accuracy: 0.4824 - val_loss: 1.2468 - val_accuracy: 0.5500
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.5384 - accuracy: 0.4326
Epoch 87: val_accuracy did not improve from 0.56250
64/64 [==============================] - 7s 102ms/step - loss: 1.5384 - accuracy: 0.4326 - val_loss: 1.7168 - val_accuracy: 0.3938
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.4315 - accuracy: 0.4961
Epoch 88: val_accuracy improved from 0.56250 to 0.59375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_3.h5
64/64 [==============================] - 30s 474ms/step - loss: 1.4315 - accuracy: 0.4961 - val_loss: 1.1803 - val_accuracy: 0.5938
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.4566 - accuracy: 0.4932
Epoch 89: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 102ms/step - loss: 1.4566 - accuracy: 0.4932 - val_loss: 1.2681 - val_accuracy: 0.5562
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.4521 - accuracy: 0.4775
Epoch 90: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 102ms/step - loss: 1.4521 - accuracy: 0.4775 - val_loss: 1.1674 - val_accuracy: 0.5625
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.4657 - accuracy: 0.4697
Epoch 91: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 104ms/step - loss: 1.4657 - accuracy: 0.4697 - val_loss: 1.4733 - val_accuracy: 0.4500
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.4633 - accuracy: 0.4883
Epoch 92: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 103ms/step - loss: 1.4633 - accuracy: 0.4883 - val_loss: 1.3237 - val_accuracy: 0.4938
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.4024 - accuracy: 0.5039
Epoch 93: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 103ms/step - loss: 1.4024 - accuracy: 0.5039 - val_loss: 1.3462 - val_accuracy: 0.4625
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.4779 - accuracy: 0.4434
Epoch 94: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 102ms/step - loss: 1.4779 - accuracy: 0.4434 - val_loss: 1.3641 - val_accuracy: 0.5750
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.4365 - accuracy: 0.4863
Epoch 95: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 102ms/step - loss: 1.4365 - accuracy: 0.4863 - val_loss: 1.3506 - val_accuracy: 0.4875
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.4714 - accuracy: 0.4717
Epoch 96: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 103ms/step - loss: 1.4714 - accuracy: 0.4717 - val_loss: 1.5053 - val_accuracy: 0.4938
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.4196 - accuracy: 0.4844
Epoch 97: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 102ms/step - loss: 1.4196 - accuracy: 0.4844 - val_loss: 1.2417 - val_accuracy: 0.5437
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.5400 - accuracy: 0.4736
Epoch 98: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 103ms/step - loss: 1.5400 - accuracy: 0.4736 - val_loss: 1.2565 - val_accuracy: 0.5250
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.4239 - accuracy: 0.5029
Epoch 99: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 102ms/step - loss: 1.4239 - accuracy: 0.5029 - val_loss: 1.1840 - val_accuracy: 0.5688
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.4348 - accuracy: 0.4873
Epoch 100: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 103ms/step - loss: 1.4348 - accuracy: 0.4873 - val_loss: 1.1736 - val_accuracy: 0.5813
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.5563 - accuracy: 0.4570
Epoch 101: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 103ms/step - loss: 1.5563 - accuracy: 0.4570 - val_loss: 1.2303 - val_accuracy: 0.5375
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.4324 - accuracy: 0.5068
Epoch 102: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 104ms/step - loss: 1.4324 - accuracy: 0.5068 - val_loss: 1.1423 - val_accuracy: 0.5688
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.4602 - accuracy: 0.4727
Epoch 103: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 103ms/step - loss: 1.4602 - accuracy: 0.4727 - val_loss: 1.1249 - val_accuracy: 0.5875
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.4334 - accuracy: 0.4863
Epoch 104: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 103ms/step - loss: 1.4334 - accuracy: 0.4863 - val_loss: 1.2156 - val_accuracy: 0.5500
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.4069 - accuracy: 0.5244
Epoch 105: val_accuracy did not improve from 0.59375
64/64 [==============================] - 7s 102ms/step - loss: 1.4069 - accuracy: 0.5244 - val_loss: 1.3548 - val_accuracy: 0.4875
********* Training time: 1146.234375 s.
*****************
* Model Summary *
*****************
Model: "model_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
dense_2 (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 119,595,020
Non-trainable params: 14,714,688
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 6s 25ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.8929 0.5357 0.6696 280
anarrhichomenum 0.4729 0.9028 0.6207 319
brevantherum 0.5339 0.4719 0.5010 267
dulcamara 0.2262 0.0603 0.0952 315
herposolanum 0.4834 0.6300 0.5470 300
holophylla 0.4184 0.6507 0.5093 335
lasiocarpa 0.9339 0.6861 0.7910 309
melongena 0.8099 0.3439 0.4828 285
micracantha 0.3604 0.3776 0.3688 294
petota 0.2807 0.4485 0.3453 301
solanum 0.3456 0.4080 0.3742 299
torva 0.2871 0.0980 0.1461 296
accuracy 0.4714 3600
macro avg 0.5038 0.4678 0.4543 3600
weighted avg 0.4994 0.4714 0.4538 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
***************************
* Started at 3531.6875... *
***************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 14.9412 - accuracy: 0.1270
Epoch 1: val_accuracy improved from -inf to 0.12500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 23s 360ms/step - loss: 14.9412 - accuracy: 0.1270 - val_loss: 3.7976 - val_accuracy: 0.1250
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 2.2030 - accuracy: 0.1904
Epoch 2: val_accuracy improved from 0.12500 to 0.21875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 31s 494ms/step - loss: 2.2030 - accuracy: 0.1904 - val_loss: 1.9452 - val_accuracy: 0.2188
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 1.9537 - accuracy: 0.2549
Epoch 3: val_accuracy improved from 0.21875 to 0.23750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 24s 379ms/step - loss: 1.9537 - accuracy: 0.2549 - val_loss: 2.1034 - val_accuracy: 0.2375
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 1.9347 - accuracy: 0.2793
Epoch 4: val_accuracy improved from 0.23750 to 0.30000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 33s 515ms/step - loss: 1.9347 - accuracy: 0.2793 - val_loss: 1.8453 - val_accuracy: 0.3000
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 1.9421 - accuracy: 0.3066
Epoch 5: val_accuracy did not improve from 0.30000
64/64 [==============================] - 7s 99ms/step - loss: 1.9421 - accuracy: 0.3066 - val_loss: 1.8907 - val_accuracy: 0.2438
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 1.8948 - accuracy: 0.2939
Epoch 6: val_accuracy improved from 0.30000 to 0.35000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 33s 525ms/step - loss: 1.8948 - accuracy: 0.2939 - val_loss: 1.8866 - val_accuracy: 0.3500
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 1.8345 - accuracy: 0.3145
Epoch 7: val_accuracy improved from 0.35000 to 0.39375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 24s 371ms/step - loss: 1.8345 - accuracy: 0.3145 - val_loss: 1.7512 - val_accuracy: 0.3938
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 1.8595 - accuracy: 0.3105
Epoch 8: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 100ms/step - loss: 1.8595 - accuracy: 0.3105 - val_loss: 2.0200 - val_accuracy: 0.2688
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 1.9035 - accuracy: 0.2764
Epoch 9: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.9035 - accuracy: 0.2764 - val_loss: 1.7339 - val_accuracy: 0.3250
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 1.8584 - accuracy: 0.2979
Epoch 10: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 101ms/step - loss: 1.8584 - accuracy: 0.2979 - val_loss: 1.8556 - val_accuracy: 0.2812
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 1.8014 - accuracy: 0.3193
Epoch 11: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.8014 - accuracy: 0.3193 - val_loss: 1.7100 - val_accuracy: 0.3688
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 1.8038 - accuracy: 0.3125
Epoch 12: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 103ms/step - loss: 1.8038 - accuracy: 0.3125 - val_loss: 1.6928 - val_accuracy: 0.3562
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 1.8258 - accuracy: 0.3066
Epoch 13: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.8258 - accuracy: 0.3066 - val_loss: 1.7335 - val_accuracy: 0.3625
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 1.8078 - accuracy: 0.3262
Epoch 14: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.8078 - accuracy: 0.3262 - val_loss: 1.8149 - val_accuracy: 0.3125
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 1.7854 - accuracy: 0.3389
Epoch 15: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 103ms/step - loss: 1.7854 - accuracy: 0.3389 - val_loss: 1.6382 - val_accuracy: 0.3750
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 1.7081 - accuracy: 0.3584
Epoch 16: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.7081 - accuracy: 0.3584 - val_loss: 1.7223 - val_accuracy: 0.3500
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 1.8104 - accuracy: 0.2812
Epoch 17: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.8104 - accuracy: 0.2812 - val_loss: 1.7064 - val_accuracy: 0.3812
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 1.7065 - accuracy: 0.3652
Epoch 18: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.7065 - accuracy: 0.3652 - val_loss: 1.9505 - val_accuracy: 0.2937
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 1.7679 - accuracy: 0.3271
Epoch 19: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.7679 - accuracy: 0.3271 - val_loss: 1.7534 - val_accuracy: 0.3750
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 1.7466 - accuracy: 0.3516
Epoch 20: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 103ms/step - loss: 1.7466 - accuracy: 0.3516 - val_loss: 1.8553 - val_accuracy: 0.2812
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 1.7133 - accuracy: 0.3379
Epoch 21: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 104ms/step - loss: 1.7133 - accuracy: 0.3379 - val_loss: 1.8979 - val_accuracy: 0.3000
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 1.7385 - accuracy: 0.3525
Epoch 22: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 103ms/step - loss: 1.7385 - accuracy: 0.3525 - val_loss: 1.7378 - val_accuracy: 0.3938
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 1.6840 - accuracy: 0.3672
Epoch 23: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.6840 - accuracy: 0.3672 - val_loss: 1.6105 - val_accuracy: 0.3812
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 1.6164 - accuracy: 0.4023
Epoch 24: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 103ms/step - loss: 1.6164 - accuracy: 0.4023 - val_loss: 1.7361 - val_accuracy: 0.3562
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 1.6394 - accuracy: 0.4092
Epoch 25: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 102ms/step - loss: 1.6394 - accuracy: 0.4092 - val_loss: 1.7402 - val_accuracy: 0.3375
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 1.6748 - accuracy: 0.4004
Epoch 26: val_accuracy improved from 0.39375 to 0.42500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 31s 489ms/step - loss: 1.6748 - accuracy: 0.4004 - val_loss: 1.5788 - val_accuracy: 0.4250
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 1.6914 - accuracy: 0.3818
Epoch 27: val_accuracy did not improve from 0.42500
64/64 [==============================] - 7s 100ms/step - loss: 1.6914 - accuracy: 0.3818 - val_loss: 1.5882 - val_accuracy: 0.3812
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 1.6692 - accuracy: 0.3926
Epoch 28: val_accuracy did not improve from 0.42500
64/64 [==============================] - 7s 102ms/step - loss: 1.6692 - accuracy: 0.3926 - val_loss: 1.8413 - val_accuracy: 0.3688
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 1.6796 - accuracy: 0.3984
Epoch 29: val_accuracy did not improve from 0.42500
64/64 [==============================] - 7s 103ms/step - loss: 1.6796 - accuracy: 0.3984 - val_loss: 1.8061 - val_accuracy: 0.3250
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 1.6838 - accuracy: 0.3926
Epoch 30: val_accuracy did not improve from 0.42500
64/64 [==============================] - 7s 102ms/step - loss: 1.6838 - accuracy: 0.3926 - val_loss: 1.7603 - val_accuracy: 0.3438
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 1.6427 - accuracy: 0.3936
Epoch 31: val_accuracy did not improve from 0.42500
64/64 [==============================] - 7s 103ms/step - loss: 1.6427 - accuracy: 0.3936 - val_loss: 1.9952 - val_accuracy: 0.3000
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 1.6561 - accuracy: 0.4023
Epoch 32: val_accuracy did not improve from 0.42500
64/64 [==============================] - 7s 103ms/step - loss: 1.6561 - accuracy: 0.4023 - val_loss: 1.6409 - val_accuracy: 0.3750
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 1.6020 - accuracy: 0.4121
Epoch 33: val_accuracy did not improve from 0.42500
64/64 [==============================] - 7s 103ms/step - loss: 1.6020 - accuracy: 0.4121 - val_loss: 1.7092 - val_accuracy: 0.3562
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 1.6179 - accuracy: 0.3945
Epoch 34: val_accuracy did not improve from 0.42500
64/64 [==============================] - 6s 101ms/step - loss: 1.6179 - accuracy: 0.3945 - val_loss: 1.7288 - val_accuracy: 0.3875
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 1.5634 - accuracy: 0.4160
Epoch 35: val_accuracy improved from 0.42500 to 0.43750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 26s 407ms/step - loss: 1.5634 - accuracy: 0.4160 - val_loss: 1.6478 - val_accuracy: 0.4375
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 1.5896 - accuracy: 0.4160
Epoch 36: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 101ms/step - loss: 1.5896 - accuracy: 0.4160 - val_loss: 1.6849 - val_accuracy: 0.4375
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 1.5916 - accuracy: 0.4111
Epoch 37: val_accuracy did not improve from 0.43750
64/64 [==============================] - 6s 101ms/step - loss: 1.5916 - accuracy: 0.4111 - val_loss: 1.8327 - val_accuracy: 0.3688
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 1.5660 - accuracy: 0.4424
Epoch 38: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 102ms/step - loss: 1.5660 - accuracy: 0.4424 - val_loss: 1.5364 - val_accuracy: 0.4375
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 1.6446 - accuracy: 0.4004
Epoch 39: val_accuracy did not improve from 0.43750
64/64 [==============================] - 6s 101ms/step - loss: 1.6446 - accuracy: 0.4004 - val_loss: 1.8757 - val_accuracy: 0.3438
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 1.6103 - accuracy: 0.4072
Epoch 40: val_accuracy did not improve from 0.43750
64/64 [==============================] - 6s 101ms/step - loss: 1.6103 - accuracy: 0.4072 - val_loss: 1.6443 - val_accuracy: 0.4062
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 1.6014 - accuracy: 0.4424
Epoch 41: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 101ms/step - loss: 1.6014 - accuracy: 0.4424 - val_loss: 1.7201 - val_accuracy: 0.3688
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 1.6151 - accuracy: 0.4180
Epoch 42: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 102ms/step - loss: 1.6151 - accuracy: 0.4180 - val_loss: 1.7790 - val_accuracy: 0.3750
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 1.5743 - accuracy: 0.4082
Epoch 43: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 101ms/step - loss: 1.5743 - accuracy: 0.4082 - val_loss: 1.6526 - val_accuracy: 0.3938
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 1.5588 - accuracy: 0.4131
Epoch 44: val_accuracy improved from 0.43750 to 0.47500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 31s 485ms/step - loss: 1.5588 - accuracy: 0.4131 - val_loss: 1.4713 - val_accuracy: 0.4750
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 1.5304 - accuracy: 0.4326
Epoch 45: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 100ms/step - loss: 1.5304 - accuracy: 0.4326 - val_loss: 1.7106 - val_accuracy: 0.4125
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 1.5035 - accuracy: 0.4570
Epoch 46: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 102ms/step - loss: 1.5035 - accuracy: 0.4570 - val_loss: 1.5575 - val_accuracy: 0.4250
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 1.5700 - accuracy: 0.4404
Epoch 47: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 103ms/step - loss: 1.5700 - accuracy: 0.4404 - val_loss: 1.8036 - val_accuracy: 0.4187
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 1.5844 - accuracy: 0.4463
Epoch 48: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 101ms/step - loss: 1.5844 - accuracy: 0.4463 - val_loss: 1.9296 - val_accuracy: 0.3625
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 1.4822 - accuracy: 0.4893
Epoch 49: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 103ms/step - loss: 1.4822 - accuracy: 0.4893 - val_loss: 1.5667 - val_accuracy: 0.4625
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 1.5249 - accuracy: 0.4443
Epoch 50: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 102ms/step - loss: 1.5249 - accuracy: 0.4443 - val_loss: 1.5451 - val_accuracy: 0.4437
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 1.5694 - accuracy: 0.4541
Epoch 51: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 103ms/step - loss: 1.5694 - accuracy: 0.4541 - val_loss: 1.5625 - val_accuracy: 0.4313
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.4735 - accuracy: 0.4580
Epoch 52: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 102ms/step - loss: 1.4735 - accuracy: 0.4580 - val_loss: 1.6499 - val_accuracy: 0.4062
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 1.4722 - accuracy: 0.4678
Epoch 53: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 103ms/step - loss: 1.4722 - accuracy: 0.4678 - val_loss: 1.6872 - val_accuracy: 0.4250
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 1.5548 - accuracy: 0.4453
Epoch 54: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 102ms/step - loss: 1.5548 - accuracy: 0.4453 - val_loss: 1.7391 - val_accuracy: 0.4062
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 1.5761 - accuracy: 0.4336
Epoch 55: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 102ms/step - loss: 1.5761 - accuracy: 0.4336 - val_loss: 1.5048 - val_accuracy: 0.4437
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 1.5338 - accuracy: 0.4395
Epoch 56: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 102ms/step - loss: 1.5338 - accuracy: 0.4395 - val_loss: 1.4263 - val_accuracy: 0.4563
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.5591 - accuracy: 0.4492
Epoch 57: val_accuracy did not improve from 0.47500
64/64 [==============================] - 7s 103ms/step - loss: 1.5591 - accuracy: 0.4492 - val_loss: 1.5784 - val_accuracy: 0.4688
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 1.4914 - accuracy: 0.4834
Epoch 58: val_accuracy improved from 0.47500 to 0.50625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 27s 432ms/step - loss: 1.4914 - accuracy: 0.4834 - val_loss: 1.3772 - val_accuracy: 0.5063
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 1.5538 - accuracy: 0.4365
Epoch 59: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 101ms/step - loss: 1.5538 - accuracy: 0.4365 - val_loss: 1.5898 - val_accuracy: 0.4375
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.5323 - accuracy: 0.4707
Epoch 60: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5323 - accuracy: 0.4707 - val_loss: 1.5324 - val_accuracy: 0.4750
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.5163 - accuracy: 0.4746
Epoch 61: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.5163 - accuracy: 0.4746 - val_loss: 1.9624 - val_accuracy: 0.3125
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 1.5434 - accuracy: 0.4541
Epoch 62: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5434 - accuracy: 0.4541 - val_loss: 1.5093 - val_accuracy: 0.4313
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.5212 - accuracy: 0.4512
Epoch 63: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5212 - accuracy: 0.4512 - val_loss: 1.5850 - val_accuracy: 0.3938
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 1.5042 - accuracy: 0.4590
Epoch 64: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5042 - accuracy: 0.4590 - val_loss: 1.7761 - val_accuracy: 0.4062
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.5283 - accuracy: 0.4629
Epoch 65: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5283 - accuracy: 0.4629 - val_loss: 1.6532 - val_accuracy: 0.3688
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.5118 - accuracy: 0.4648
Epoch 66: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5118 - accuracy: 0.4648 - val_loss: 1.5611 - val_accuracy: 0.4750
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.5653 - accuracy: 0.4541
Epoch 67: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5653 - accuracy: 0.4541 - val_loss: 1.6316 - val_accuracy: 0.4187
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 1.5038 - accuracy: 0.4531
Epoch 68: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.5038 - accuracy: 0.4531 - val_loss: 1.6310 - val_accuracy: 0.4250
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.5273 - accuracy: 0.4395
Epoch 69: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 101ms/step - loss: 1.5273 - accuracy: 0.4395 - val_loss: 1.4613 - val_accuracy: 0.4563
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.4453 - accuracy: 0.4746
Epoch 70: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.4453 - accuracy: 0.4746 - val_loss: 1.5851 - val_accuracy: 0.4938
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.5070 - accuracy: 0.4541
Epoch 71: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5070 - accuracy: 0.4541 - val_loss: 1.5161 - val_accuracy: 0.4563
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.5056 - accuracy: 0.4463
Epoch 72: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5056 - accuracy: 0.4463 - val_loss: 1.8675 - val_accuracy: 0.3438
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.4868 - accuracy: 0.4590
Epoch 73: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4868 - accuracy: 0.4590 - val_loss: 1.5697 - val_accuracy: 0.4375
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.4765 - accuracy: 0.4736
Epoch 74: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4765 - accuracy: 0.4736 - val_loss: 1.4136 - val_accuracy: 0.4563
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.4216 - accuracy: 0.4932
Epoch 75: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.4216 - accuracy: 0.4932 - val_loss: 1.5219 - val_accuracy: 0.4625
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.4208 - accuracy: 0.4941
Epoch 76: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4208 - accuracy: 0.4941 - val_loss: 1.6485 - val_accuracy: 0.3750
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.4914 - accuracy: 0.4746
Epoch 77: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4914 - accuracy: 0.4746 - val_loss: 1.4900 - val_accuracy: 0.4563
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.5254 - accuracy: 0.4365
Epoch 78: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.5254 - accuracy: 0.4365 - val_loss: 1.5956 - val_accuracy: 0.4625
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.4961 - accuracy: 0.4570
Epoch 79: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4961 - accuracy: 0.4570 - val_loss: 1.5447 - val_accuracy: 0.4563
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.4784 - accuracy: 0.4766
Epoch 80: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.4784 - accuracy: 0.4766 - val_loss: 1.5455 - val_accuracy: 0.4750
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.4465 - accuracy: 0.4736
Epoch 81: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4465 - accuracy: 0.4736 - val_loss: 1.9084 - val_accuracy: 0.3812
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.4637 - accuracy: 0.4834
Epoch 82: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 102ms/step - loss: 1.4637 - accuracy: 0.4834 - val_loss: 1.9312 - val_accuracy: 0.3875
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.4420 - accuracy: 0.4863
Epoch 83: val_accuracy did not improve from 0.50625
64/64 [==============================] - 7s 103ms/step - loss: 1.4420 - accuracy: 0.4863 - val_loss: 1.5690 - val_accuracy: 0.4375
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.4658 - accuracy: 0.4785
Epoch 84: val_accuracy improved from 0.50625 to 0.54375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16_tl_20230217091009_4.h5
64/64 [==============================] - 32s 508ms/step - loss: 1.4658 - accuracy: 0.4785 - val_loss: 1.3468 - val_accuracy: 0.5437
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.4885 - accuracy: 0.4453
Epoch 85: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 101ms/step - loss: 1.4885 - accuracy: 0.4453 - val_loss: 1.5861 - val_accuracy: 0.5000
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.4045 - accuracy: 0.4785
Epoch 86: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4045 - accuracy: 0.4785 - val_loss: 1.4179 - val_accuracy: 0.5125
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.4069 - accuracy: 0.4932
Epoch 87: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4069 - accuracy: 0.4932 - val_loss: 1.5567 - val_accuracy: 0.4625
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.4485 - accuracy: 0.4834
Epoch 88: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4485 - accuracy: 0.4834 - val_loss: 1.6090 - val_accuracy: 0.4875
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.4744 - accuracy: 0.4746
Epoch 89: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 104ms/step - loss: 1.4744 - accuracy: 0.4746 - val_loss: 1.6301 - val_accuracy: 0.4313
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.4212 - accuracy: 0.5039
Epoch 90: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4212 - accuracy: 0.5039 - val_loss: 1.7306 - val_accuracy: 0.4500
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.4667 - accuracy: 0.4756
Epoch 91: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.4667 - accuracy: 0.4756 - val_loss: 1.6038 - val_accuracy: 0.4688
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.4245 - accuracy: 0.5059
Epoch 92: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4245 - accuracy: 0.5059 - val_loss: 1.9219 - val_accuracy: 0.3500
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.4790 - accuracy: 0.4658
Epoch 93: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.4790 - accuracy: 0.4658 - val_loss: 1.5015 - val_accuracy: 0.4688
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.4809 - accuracy: 0.4980
Epoch 94: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.4809 - accuracy: 0.4980 - val_loss: 1.4786 - val_accuracy: 0.5000
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.4275 - accuracy: 0.4844
Epoch 95: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4275 - accuracy: 0.4844 - val_loss: 1.4574 - val_accuracy: 0.5000
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.4023 - accuracy: 0.5059
Epoch 96: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.4023 - accuracy: 0.5059 - val_loss: 1.6669 - val_accuracy: 0.4437
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.4498 - accuracy: 0.4756
Epoch 97: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4498 - accuracy: 0.4756 - val_loss: 1.3901 - val_accuracy: 0.5125
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.4808 - accuracy: 0.4668
Epoch 98: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4808 - accuracy: 0.4668 - val_loss: 1.5109 - val_accuracy: 0.4500
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.4471 - accuracy: 0.4775
Epoch 99: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.4471 - accuracy: 0.4775 - val_loss: 1.5076 - val_accuracy: 0.4688
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.4420 - accuracy: 0.5029
Epoch 100: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4420 - accuracy: 0.5029 - val_loss: 1.4942 - val_accuracy: 0.4875
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.4988 - accuracy: 0.4766
Epoch 101: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.4988 - accuracy: 0.4766 - val_loss: 1.5585 - val_accuracy: 0.4688
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.4229 - accuracy: 0.4736
Epoch 102: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4229 - accuracy: 0.4736 - val_loss: 1.5906 - val_accuracy: 0.4625
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.4151 - accuracy: 0.4961
Epoch 103: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.4151 - accuracy: 0.4961 - val_loss: 1.5588 - val_accuracy: 0.4375
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.4594 - accuracy: 0.5039
Epoch 104: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 102ms/step - loss: 1.4594 - accuracy: 0.5039 - val_loss: 1.4123 - val_accuracy: 0.4750
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.3569 - accuracy: 0.5195
Epoch 105: val_accuracy did not improve from 0.54375
64/64 [==============================] - 7s 103ms/step - loss: 1.3569 - accuracy: 0.5195 - val_loss: 1.4070 - val_accuracy: 0.5437
********* Training time: 1141.921875 s.
*****************
* Model Summary *
*****************
Model: "model_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_4 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
dense_3 (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 119,595,020
Non-trainable params: 14,714,688
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 6s 25ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.6557 0.8399 0.7364 331
anarrhichomenum 0.6368 0.4671 0.5389 319
brevantherum 0.6616 0.4199 0.5137 312
dulcamara 0.3075 0.3822 0.3408 259
herposolanum 0.5431 0.4615 0.4990 273
holophylla 0.3770 0.6646 0.4811 316
lasiocarpa 0.9944 0.5701 0.7247 314
melongena 0.8281 0.1755 0.2896 302
micracantha 0.3710 0.4063 0.3879 315
petota 0.3967 0.6644 0.4968 289
solanum 0.3516 0.5904 0.4408 271
torva 0.3714 0.1304 0.1931 299
accuracy 0.4844 3600
macro avg 0.5412 0.4810 0.4702 3600
weighted avg 0.5483 0.4844 0.4751 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
******************************* * Mean metrics across 4 folds * *******************************
| 0 | |
|---|---|
| accuracy | 0.475972 |
| acanthophora.precision | 0.767927 |
| acanthophora.recall | 0.724700 |
| acanthophora.f1-score | 0.713952 |
| acanthophora.support | 298.250000 |
| anarrhichomenum.precision | 0.595728 |
| anarrhichomenum.recall | 0.706986 |
| anarrhichomenum.f1-score | 0.602344 |
| anarrhichomenum.support | 318.750000 |
| brevantherum.precision | 0.571459 |
| brevantherum.recall | 0.358761 |
| brevantherum.f1-score | 0.425595 |
| brevantherum.support | 288.000000 |
| dulcamara.precision | 0.324495 |
| dulcamara.recall | 0.193336 |
| dulcamara.f1-score | 0.220247 |
| dulcamara.support | 295.500000 |
| herposolanum.precision | 0.434159 |
| herposolanum.recall | 0.580904 |
| herposolanum.f1-score | 0.479326 |
| herposolanum.support | 285.000000 |
| holophylla.precision | 0.432474 |
| holophylla.recall | 0.567413 |
| holophylla.f1-score | 0.482154 |
| holophylla.support | 304.750000 |
| lasiocarpa.precision | 0.904777 |
| lasiocarpa.recall | 0.719096 |
| lasiocarpa.f1-score | 0.788687 |
| lasiocarpa.support | 312.500000 |
| melongena.precision | 0.808505 |
| melongena.recall | 0.324550 |
| melongena.f1-score | 0.429940 |
| melongena.support | 298.000000 |
| micracantha.precision | 0.448051 |
| micracantha.recall | 0.380374 |
| micracantha.f1-score | 0.405778 |
| micracantha.support | 300.000000 |
| petota.precision | 0.341838 |
| petota.recall | 0.632698 |
| petota.f1-score | 0.426575 |
| petota.support | 304.500000 |
| solanum.precision | 0.331441 |
| solanum.recall | 0.390233 |
| solanum.f1-score | 0.347896 |
| solanum.support | 294.500000 |
| torva.precision | 0.426752 |
| torva.recall | 0.090926 |
| torva.f1-score | 0.144653 |
| torva.support | 300.250000 |
| macro avg.precision | 0.532301 |
| macro avg.recall | 0.472498 |
| macro avg.f1-score | 0.455596 |
| macro avg.support | 3600.000000 |
| weighted avg.precision | 0.533449 |
| weighted avg.recall | 0.475972 |
| weighted avg.f1-score | 0.458047 |
| weighted avg.support | 3600.000000 |
CPU times: total: 1h 17min 53s Wall time: 1h 7min 28s
%%time
kf = ShuffleSplit(n_splits=CV_FOLDS,
test_size=VAL_SIZE,
random_state=RANDOM_SEED)
split = 1
resnet50_cv_val_pred = []
for train_index, val_index in kf.split(balanced_training_data):
resnet50_model = tf.keras.applications.ResNet50(
pooling="avg",
include_top=True,
weights=None,
classes=NUM_CLASSES,
classifier_activation='softmax'
)
# Add an optimizer
resnet50_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.003),
loss='categorical_crossentropy',
metrics=['accuracy'])
# resnet50_model.compile(optimizer=tf.optimizers.SGD(learning_rate=0.003, momentum=0.9),
# loss='categorical_crossentropy',
# metrics=['accuracy'])
training_split_data = balanced_training_data.iloc[train_index]
val_split_data = balanced_training_data.iloc[val_index]
# Add a progress bar and save checkpoints
resnet50_callbacks = [
create_model_checkpoint(os.path.join(DATA_ROOT_LOCATION, f"resnet50_{TRAINING_RUN_ID}_{split}.h5")),
tf.keras.callbacks.ProgbarLogger(
count_mode = 'steps',
stateful_metrics = None
),
tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy',
min_delta=0,
patience=40,
verbose=1,
mode='auto',
restore_best_weights=True
)
]
fit_params = {
"x": training_split_data,
"epochs": 105,
"callbacks": resnet50_callbacks,
"validation_data": val_split_data,
"steps_per_epoch": 64,
"validation_steps": 10,
}
preproc_func = tf.keras.applications.resnet50.preprocess_input
with tf.device(TRAINING_DEVICE_NAME):
resnet50_training_history = train_model(resnet50_model,
fit_params=fit_params,
preproc_func=preproc_func)
_, pred_report = evaluate_model(resnet50_model,
resnet50_training_history,
fit_params=fit_params,
preproc_func=preproc_func)
resnet50_cv_val_pred.append(pred_report)
split += 1
calculate_cv_mean_metrics(resnet50_cv_val_pred)
**************************
* Started at 11.59375... *
**************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 3.9523 - accuracy: 0.0850
Epoch 1: val_accuracy improved from -inf to 0.06875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 16s 141ms/step - loss: 3.9523 - accuracy: 0.0850 - val_loss: 3304.0222 - val_accuracy: 0.0688
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 2.5358 - accuracy: 0.1260
Epoch 2: val_accuracy improved from 0.06875 to 0.13750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 13s 200ms/step - loss: 2.5358 - accuracy: 0.1260 - val_loss: 13.0022 - val_accuracy: 0.1375
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 2.6443 - accuracy: 0.1436
Epoch 3: val_accuracy did not improve from 0.13750
64/64 [==============================] - 13s 196ms/step - loss: 2.6443 - accuracy: 0.1436 - val_loss: 361.7343 - val_accuracy: 0.0625
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 2.4638 - accuracy: 0.1494
Epoch 4: val_accuracy did not improve from 0.13750
64/64 [==============================] - 8s 129ms/step - loss: 2.4638 - accuracy: 0.1494 - val_loss: 2.4240 - val_accuracy: 0.1063
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 2.3624 - accuracy: 0.1768
Epoch 5: val_accuracy improved from 0.13750 to 0.15625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 10s 153ms/step - loss: 2.3624 - accuracy: 0.1768 - val_loss: 2.4630 - val_accuracy: 0.1562
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 2.3109 - accuracy: 0.2041
Epoch 6: val_accuracy improved from 0.15625 to 0.20000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 11s 174ms/step - loss: 2.3109 - accuracy: 0.2041 - val_loss: 2.3138 - val_accuracy: 0.2000
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 2.3030 - accuracy: 0.1895
Epoch 7: val_accuracy did not improve from 0.20000
64/64 [==============================] - 8s 131ms/step - loss: 2.3030 - accuracy: 0.1895 - val_loss: 25.8597 - val_accuracy: 0.1562
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 2.2849 - accuracy: 0.1943
Epoch 8: val_accuracy did not improve from 0.20000
64/64 [==============================] - 8s 121ms/step - loss: 2.2849 - accuracy: 0.1943 - val_loss: 5.4640 - val_accuracy: 0.1375
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 2.2538 - accuracy: 0.1895
Epoch 9: val_accuracy did not improve from 0.20000
64/64 [==============================] - 8s 121ms/step - loss: 2.2538 - accuracy: 0.1895 - val_loss: 5.8237 - val_accuracy: 0.1000
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 2.1991 - accuracy: 0.2090
Epoch 10: val_accuracy improved from 0.20000 to 0.21875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 8s 131ms/step - loss: 2.1991 - accuracy: 0.2090 - val_loss: 2.6069 - val_accuracy: 0.2188
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 2.2279 - accuracy: 0.2109
Epoch 11: val_accuracy did not improve from 0.21875
64/64 [==============================] - 9s 131ms/step - loss: 2.2279 - accuracy: 0.2109 - val_loss: 3.3335 - val_accuracy: 0.1875
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 2.1907 - accuracy: 0.2412
Epoch 12: val_accuracy did not improve from 0.21875
64/64 [==============================] - 8s 117ms/step - loss: 2.1907 - accuracy: 0.2412 - val_loss: 4.5206 - val_accuracy: 0.1250
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 2.1448 - accuracy: 0.2363
Epoch 13: val_accuracy improved from 0.21875 to 0.23750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 9s 142ms/step - loss: 2.1448 - accuracy: 0.2363 - val_loss: 2.2018 - val_accuracy: 0.2375
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 2.1114 - accuracy: 0.2490
Epoch 14: val_accuracy did not improve from 0.23750
64/64 [==============================] - 8s 124ms/step - loss: 2.1114 - accuracy: 0.2490 - val_loss: 2.5284 - val_accuracy: 0.2313
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 2.0689 - accuracy: 0.2598
Epoch 15: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 115ms/step - loss: 2.0689 - accuracy: 0.2598 - val_loss: 3.0121 - val_accuracy: 0.1625
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 2.0941 - accuracy: 0.2363
Epoch 16: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 114ms/step - loss: 2.0941 - accuracy: 0.2363 - val_loss: 3.8181 - val_accuracy: 0.2000
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 2.0380 - accuracy: 0.2607
Epoch 17: val_accuracy improved from 0.23750 to 0.25625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 9s 133ms/step - loss: 2.0380 - accuracy: 0.2607 - val_loss: 2.4819 - val_accuracy: 0.2562
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 2.0299 - accuracy: 0.2666
Epoch 18: val_accuracy did not improve from 0.25625
64/64 [==============================] - 8s 117ms/step - loss: 2.0299 - accuracy: 0.2666 - val_loss: 2.3683 - val_accuracy: 0.2250
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 2.0635 - accuracy: 0.2549
Epoch 19: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 112ms/step - loss: 2.0635 - accuracy: 0.2549 - val_loss: 2.9197 - val_accuracy: 0.1562
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 2.0713 - accuracy: 0.2607
Epoch 20: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 113ms/step - loss: 2.0713 - accuracy: 0.2607 - val_loss: 7.3077 - val_accuracy: 0.1250
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 2.0474 - accuracy: 0.2812
Epoch 21: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 116ms/step - loss: 2.0474 - accuracy: 0.2812 - val_loss: 2.2546 - val_accuracy: 0.2125
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 2.0342 - accuracy: 0.2695
Epoch 22: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 114ms/step - loss: 2.0342 - accuracy: 0.2695 - val_loss: 10.2068 - val_accuracy: 0.1063
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 2.0281 - accuracy: 0.2793
Epoch 23: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 116ms/step - loss: 2.0281 - accuracy: 0.2793 - val_loss: 5.0967 - val_accuracy: 0.2062
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 2.0272 - accuracy: 0.2744
Epoch 24: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 114ms/step - loss: 2.0272 - accuracy: 0.2744 - val_loss: 3.9561 - val_accuracy: 0.1688
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 1.9860 - accuracy: 0.2979
Epoch 25: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 114ms/step - loss: 1.9860 - accuracy: 0.2979 - val_loss: 4.1490 - val_accuracy: 0.1625
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 1.9542 - accuracy: 0.2979
Epoch 26: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 114ms/step - loss: 1.9542 - accuracy: 0.2979 - val_loss: 3.5106 - val_accuracy: 0.2062
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 2.0085 - accuracy: 0.2783
Epoch 27: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 112ms/step - loss: 2.0085 - accuracy: 0.2783 - val_loss: 5.1532 - val_accuracy: 0.1625
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 1.9019 - accuracy: 0.3096
Epoch 28: val_accuracy improved from 0.25625 to 0.30625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 9s 136ms/step - loss: 1.9019 - accuracy: 0.3096 - val_loss: 1.9293 - val_accuracy: 0.3063
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 1.9803 - accuracy: 0.2939
Epoch 29: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 110ms/step - loss: 1.9803 - accuracy: 0.2939 - val_loss: 3.3186 - val_accuracy: 0.1500
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 1.9498 - accuracy: 0.3105
Epoch 30: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 1.9498 - accuracy: 0.3105 - val_loss: 2.0330 - val_accuracy: 0.2625
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 1.9434 - accuracy: 0.3008
Epoch 31: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 113ms/step - loss: 1.9434 - accuracy: 0.3008 - val_loss: 2.4606 - val_accuracy: 0.2562
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 1.9491 - accuracy: 0.3154
Epoch 32: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 114ms/step - loss: 1.9491 - accuracy: 0.3154 - val_loss: 2.9831 - val_accuracy: 0.2500
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 1.8720 - accuracy: 0.3066
Epoch 33: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 1.8720 - accuracy: 0.3066 - val_loss: 3.2116 - val_accuracy: 0.2625
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 1.9014 - accuracy: 0.3330
Epoch 34: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 1.9014 - accuracy: 0.3330 - val_loss: 2.5069 - val_accuracy: 0.2062
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 2.0149 - accuracy: 0.2832
Epoch 35: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 2.0149 - accuracy: 0.2832 - val_loss: 38.3172 - val_accuracy: 0.1562
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 1.9780 - accuracy: 0.2988
Epoch 36: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 1.9780 - accuracy: 0.2988 - val_loss: 2.9295 - val_accuracy: 0.2000
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 1.9236 - accuracy: 0.3076
Epoch 37: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 1.9236 - accuracy: 0.3076 - val_loss: 6.0876 - val_accuracy: 0.2313
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 1.9806 - accuracy: 0.2773
Epoch 38: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 110ms/step - loss: 1.9806 - accuracy: 0.2773 - val_loss: 2.2363 - val_accuracy: 0.2250
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 1.9538 - accuracy: 0.2979
Epoch 39: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 1.9538 - accuracy: 0.2979 - val_loss: 6.6918 - val_accuracy: 0.1312
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 1.8993 - accuracy: 0.3076
Epoch 40: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 1.8993 - accuracy: 0.3076 - val_loss: 9.1859 - val_accuracy: 0.1312
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 1.9046 - accuracy: 0.3184
Epoch 41: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 110ms/step - loss: 1.9046 - accuracy: 0.3184 - val_loss: 2.4718 - val_accuracy: 0.2688
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 1.9266 - accuracy: 0.3232
Epoch 42: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 1.9266 - accuracy: 0.3232 - val_loss: 3.0511 - val_accuracy: 0.2562
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 1.9161 - accuracy: 0.3174
Epoch 43: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 110ms/step - loss: 1.9161 - accuracy: 0.3174 - val_loss: 4.8519 - val_accuracy: 0.1875
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 1.8713 - accuracy: 0.3359
Epoch 44: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 110ms/step - loss: 1.8713 - accuracy: 0.3359 - val_loss: 5.5246 - val_accuracy: 0.2062
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 1.9069 - accuracy: 0.3281
Epoch 45: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 110ms/step - loss: 1.9069 - accuracy: 0.3281 - val_loss: 2.0925 - val_accuracy: 0.2937
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 1.8806 - accuracy: 0.3389
Epoch 46: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 1.8806 - accuracy: 0.3389 - val_loss: 4.3142 - val_accuracy: 0.2500
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 1.8739 - accuracy: 0.3311
Epoch 47: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 110ms/step - loss: 1.8739 - accuracy: 0.3311 - val_loss: 2.1895 - val_accuracy: 0.2688
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 1.8454 - accuracy: 0.3301
Epoch 48: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 1.8454 - accuracy: 0.3301 - val_loss: 2.3883 - val_accuracy: 0.3063
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 1.8319 - accuracy: 0.3496
Epoch 49: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 113ms/step - loss: 1.8319 - accuracy: 0.3496 - val_loss: 2.4354 - val_accuracy: 0.2562
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 1.8577 - accuracy: 0.3477
Epoch 50: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 109ms/step - loss: 1.8577 - accuracy: 0.3477 - val_loss: 2.2787 - val_accuracy: 0.2875
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 1.8529 - accuracy: 0.3613
Epoch 51: val_accuracy improved from 0.30625 to 0.33750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 8s 126ms/step - loss: 1.8529 - accuracy: 0.3613 - val_loss: 1.9150 - val_accuracy: 0.3375
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.8803 - accuracy: 0.3379
Epoch 52: val_accuracy did not improve from 0.33750
64/64 [==============================] - 7s 110ms/step - loss: 1.8803 - accuracy: 0.3379 - val_loss: 2.0774 - val_accuracy: 0.2750
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 1.8852 - accuracy: 0.3242
Epoch 53: val_accuracy improved from 0.33750 to 0.34375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 8s 124ms/step - loss: 1.8852 - accuracy: 0.3242 - val_loss: 1.9119 - val_accuracy: 0.3438
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 1.8527 - accuracy: 0.3438
Epoch 54: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 109ms/step - loss: 1.8527 - accuracy: 0.3438 - val_loss: 2.9542 - val_accuracy: 0.2500
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 1.7882 - accuracy: 0.3428
Epoch 55: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 109ms/step - loss: 1.7882 - accuracy: 0.3428 - val_loss: 3.0582 - val_accuracy: 0.2625
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 1.8778 - accuracy: 0.3350
Epoch 56: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 109ms/step - loss: 1.8778 - accuracy: 0.3350 - val_loss: 2.2699 - val_accuracy: 0.3000
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.8786 - accuracy: 0.3154
Epoch 57: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 109ms/step - loss: 1.8786 - accuracy: 0.3154 - val_loss: 2.2414 - val_accuracy: 0.2875
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 1.8158 - accuracy: 0.3311
Epoch 58: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 112ms/step - loss: 1.8158 - accuracy: 0.3311 - val_loss: 2.9257 - val_accuracy: 0.2438
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 1.7976 - accuracy: 0.3457
Epoch 59: val_accuracy improved from 0.34375 to 0.40625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 8s 128ms/step - loss: 1.7976 - accuracy: 0.3457 - val_loss: 1.8131 - val_accuracy: 0.4062
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.8129 - accuracy: 0.3428
Epoch 60: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 1.8129 - accuracy: 0.3428 - val_loss: 3.0188 - val_accuracy: 0.2750
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.8813 - accuracy: 0.3369
Epoch 61: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 1.8813 - accuracy: 0.3369 - val_loss: 5.3609 - val_accuracy: 0.1937
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 1.8767 - accuracy: 0.3311
Epoch 62: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 1.8767 - accuracy: 0.3311 - val_loss: 5.5975 - val_accuracy: 0.1437
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.8553 - accuracy: 0.3506
Epoch 63: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 109ms/step - loss: 1.8553 - accuracy: 0.3506 - val_loss: 6.0375 - val_accuracy: 0.1750
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 1.7814 - accuracy: 0.3770
Epoch 64: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 1.7814 - accuracy: 0.3770 - val_loss: 2.0218 - val_accuracy: 0.3562
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.8135 - accuracy: 0.3682
Epoch 65: val_accuracy improved from 0.40625 to 0.41250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 9s 134ms/step - loss: 1.8135 - accuracy: 0.3682 - val_loss: 2.0649 - val_accuracy: 0.4125
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.7086 - accuracy: 0.3896
Epoch 66: val_accuracy did not improve from 0.41250
64/64 [==============================] - 7s 111ms/step - loss: 1.7086 - accuracy: 0.3896 - val_loss: 2.2259 - val_accuracy: 0.4000
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.7595 - accuracy: 0.3662
Epoch 67: val_accuracy improved from 0.41250 to 0.43750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 9s 142ms/step - loss: 1.7595 - accuracy: 0.3662 - val_loss: 1.8778 - val_accuracy: 0.4375
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 1.7371 - accuracy: 0.3877
Epoch 68: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 112ms/step - loss: 1.7371 - accuracy: 0.3877 - val_loss: 1.8521 - val_accuracy: 0.4000
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.7651 - accuracy: 0.3672
Epoch 69: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 112ms/step - loss: 1.7651 - accuracy: 0.3672 - val_loss: 1.8758 - val_accuracy: 0.3313
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.7485 - accuracy: 0.3770
Epoch 70: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 112ms/step - loss: 1.7485 - accuracy: 0.3770 - val_loss: 2.0379 - val_accuracy: 0.3313
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.7851 - accuracy: 0.3877
Epoch 71: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 112ms/step - loss: 1.7851 - accuracy: 0.3877 - val_loss: 6.0165 - val_accuracy: 0.2062
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.7607 - accuracy: 0.3838
Epoch 72: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.7607 - accuracy: 0.3838 - val_loss: 2.1400 - val_accuracy: 0.3375
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.7333 - accuracy: 0.3545
Epoch 73: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 113ms/step - loss: 1.7333 - accuracy: 0.3545 - val_loss: 1.7137 - val_accuracy: 0.3688
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.7759 - accuracy: 0.3838
Epoch 74: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 115ms/step - loss: 1.7759 - accuracy: 0.3838 - val_loss: 2.3283 - val_accuracy: 0.3375
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.7496 - accuracy: 0.3828
Epoch 75: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.7496 - accuracy: 0.3828 - val_loss: 1.7013 - val_accuracy: 0.4062
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.7037 - accuracy: 0.4336
Epoch 76: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.7037 - accuracy: 0.4336 - val_loss: 2.0851 - val_accuracy: 0.3438
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.7343 - accuracy: 0.3936
Epoch 77: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 113ms/step - loss: 1.7343 - accuracy: 0.3936 - val_loss: 2.0351 - val_accuracy: 0.3063
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.7121 - accuracy: 0.3877
Epoch 78: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 112ms/step - loss: 1.7121 - accuracy: 0.3877 - val_loss: 4.1976 - val_accuracy: 0.2625
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.7005 - accuracy: 0.3867
Epoch 79: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.7005 - accuracy: 0.3867 - val_loss: 12.1819 - val_accuracy: 0.1500
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.6815 - accuracy: 0.4160
Epoch 80: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 110ms/step - loss: 1.6815 - accuracy: 0.4160 - val_loss: 2.7275 - val_accuracy: 0.2875
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.7073 - accuracy: 0.4023
Epoch 81: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.7073 - accuracy: 0.4023 - val_loss: 2.3153 - val_accuracy: 0.3375
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.7259 - accuracy: 0.3945
Epoch 82: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 112ms/step - loss: 1.7259 - accuracy: 0.3945 - val_loss: 1.8843 - val_accuracy: 0.4250
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.8228 - accuracy: 0.3662
Epoch 83: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 114ms/step - loss: 1.8228 - accuracy: 0.3662 - val_loss: 7.8654 - val_accuracy: 0.2625
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.7581 - accuracy: 0.3730
Epoch 84: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.7581 - accuracy: 0.3730 - val_loss: 2.6081 - val_accuracy: 0.3562
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.6946 - accuracy: 0.4189
Epoch 85: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.6946 - accuracy: 0.4189 - val_loss: 2.3701 - val_accuracy: 0.3313
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.6484 - accuracy: 0.4238
Epoch 86: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.6484 - accuracy: 0.4238 - val_loss: 2.4030 - val_accuracy: 0.3250
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.5804 - accuracy: 0.4365
Epoch 87: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 114ms/step - loss: 1.5804 - accuracy: 0.4365 - val_loss: 1.8291 - val_accuracy: 0.4313
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.6702 - accuracy: 0.3984
Epoch 88: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 112ms/step - loss: 1.6702 - accuracy: 0.3984 - val_loss: 2.0837 - val_accuracy: 0.3125
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.7060 - accuracy: 0.3926
Epoch 89: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 114ms/step - loss: 1.7060 - accuracy: 0.3926 - val_loss: 3.6748 - val_accuracy: 0.1813
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.6419 - accuracy: 0.4111
Epoch 90: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 111ms/step - loss: 1.6419 - accuracy: 0.4111 - val_loss: 2.0762 - val_accuracy: 0.3438
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.6250 - accuracy: 0.4248
Epoch 91: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 114ms/step - loss: 1.6250 - accuracy: 0.4248 - val_loss: 2.0953 - val_accuracy: 0.3438
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.5839 - accuracy: 0.4307
Epoch 92: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 110ms/step - loss: 1.5839 - accuracy: 0.4307 - val_loss: 3.0040 - val_accuracy: 0.2125
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.6052 - accuracy: 0.4346
Epoch 93: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 114ms/step - loss: 1.6052 - accuracy: 0.4346 - val_loss: 2.1340 - val_accuracy: 0.3750
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.6357 - accuracy: 0.4297
Epoch 94: val_accuracy improved from 0.43750 to 0.48125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_1.h5
64/64 [==============================] - 9s 136ms/step - loss: 1.6357 - accuracy: 0.4297 - val_loss: 2.1239 - val_accuracy: 0.4812
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.5992 - accuracy: 0.4365
Epoch 95: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 109ms/step - loss: 1.5992 - accuracy: 0.4365 - val_loss: 3.5609 - val_accuracy: 0.2750
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.5889 - accuracy: 0.4297
Epoch 96: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 110ms/step - loss: 1.5889 - accuracy: 0.4297 - val_loss: 4.5117 - val_accuracy: 0.2812
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.6302 - accuracy: 0.4268
Epoch 97: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 109ms/step - loss: 1.6302 - accuracy: 0.4268 - val_loss: 2.0663 - val_accuracy: 0.3812
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.5556 - accuracy: 0.4482
Epoch 98: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 109ms/step - loss: 1.5556 - accuracy: 0.4482 - val_loss: 2.8550 - val_accuracy: 0.2313
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.5884 - accuracy: 0.4434
Epoch 99: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 110ms/step - loss: 1.5884 - accuracy: 0.4434 - val_loss: 3.0940 - val_accuracy: 0.3875
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.6178 - accuracy: 0.4395
Epoch 100: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 110ms/step - loss: 1.6178 - accuracy: 0.4395 - val_loss: 1.9225 - val_accuracy: 0.3812
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.5507 - accuracy: 0.4600
Epoch 101: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 116ms/step - loss: 1.5507 - accuracy: 0.4600 - val_loss: 1.8372 - val_accuracy: 0.4125
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.4970 - accuracy: 0.4727
Epoch 102: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 108ms/step - loss: 1.4970 - accuracy: 0.4727 - val_loss: 2.1633 - val_accuracy: 0.3375
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.5522 - accuracy: 0.4473
Epoch 103: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 104ms/step - loss: 1.5522 - accuracy: 0.4473 - val_loss: 2.9998 - val_accuracy: 0.2188
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.4440 - accuracy: 0.4756
Epoch 104: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 104ms/step - loss: 1.4440 - accuracy: 0.4756 - val_loss: 3.6900 - val_accuracy: 0.2625
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.5333 - accuracy: 0.4590
Epoch 105: val_accuracy did not improve from 0.48125
64/64 [==============================] - 7s 103ms/step - loss: 1.5333 - accuracy: 0.4590 - val_loss: 2.4637 - val_accuracy: 0.3625
********* Training time: 1431.578125 s.
*****************
* Model Summary *
*****************
Model: "resnet50"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 224, 224, 3 0 []
)]
conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 ['input_1[0][0]']
conv1_conv (Conv2D) (None, 112, 112, 64 9472 ['conv1_pad[0][0]']
)
conv1_bn (BatchNormalization) (None, 112, 112, 64 256 ['conv1_conv[0][0]']
)
conv1_relu (Activation) (None, 112, 112, 64 0 ['conv1_bn[0][0]']
)
pool1_pad (ZeroPadding2D) (None, 114, 114, 64 0 ['conv1_relu[0][0]']
)
pool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 ['pool1_pad[0][0]']
conv2_block1_1_conv (Conv2D) (None, 56, 56, 64) 4160 ['pool1_pool[0][0]']
conv2_block1_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_1_conv[0][0]']
ization)
conv2_block1_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_1_bn[0][0]']
n)
conv2_block1_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block1_1_relu[0][0]']
conv2_block1_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_2_conv[0][0]']
ization)
conv2_block1_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_2_bn[0][0]']
n)
conv2_block1_0_conv (Conv2D) (None, 56, 56, 256) 16640 ['pool1_pool[0][0]']
conv2_block1_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block1_2_relu[0][0]']
conv2_block1_0_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_0_conv[0][0]']
ization)
conv2_block1_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_3_conv[0][0]']
ization)
conv2_block1_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_0_bn[0][0]',
'conv2_block1_3_bn[0][0]']
conv2_block1_out (Activation) (None, 56, 56, 256) 0 ['conv2_block1_add[0][0]']
conv2_block2_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block1_out[0][0]']
conv2_block2_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_1_conv[0][0]']
ization)
conv2_block2_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_1_bn[0][0]']
n)
conv2_block2_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block2_1_relu[0][0]']
conv2_block2_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_2_conv[0][0]']
ization)
conv2_block2_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_2_bn[0][0]']
n)
conv2_block2_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block2_2_relu[0][0]']
conv2_block2_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block2_3_conv[0][0]']
ization)
conv2_block2_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_out[0][0]',
'conv2_block2_3_bn[0][0]']
conv2_block2_out (Activation) (None, 56, 56, 256) 0 ['conv2_block2_add[0][0]']
conv2_block3_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block2_out[0][0]']
conv2_block3_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_1_conv[0][0]']
ization)
conv2_block3_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_1_bn[0][0]']
n)
conv2_block3_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block3_1_relu[0][0]']
conv2_block3_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_2_conv[0][0]']
ization)
conv2_block3_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_2_bn[0][0]']
n)
conv2_block3_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block3_2_relu[0][0]']
conv2_block3_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block3_3_conv[0][0]']
ization)
conv2_block3_add (Add) (None, 56, 56, 256) 0 ['conv2_block2_out[0][0]',
'conv2_block3_3_bn[0][0]']
conv2_block3_out (Activation) (None, 56, 56, 256) 0 ['conv2_block3_add[0][0]']
conv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 32896 ['conv2_block3_out[0][0]']
conv3_block1_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_1_conv[0][0]']
ization)
conv3_block1_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_1_bn[0][0]']
n)
conv3_block1_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block1_1_relu[0][0]']
conv3_block1_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_2_conv[0][0]']
ization)
conv3_block1_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_2_bn[0][0]']
n)
conv3_block1_0_conv (Conv2D) (None, 28, 28, 512) 131584 ['conv2_block3_out[0][0]']
conv3_block1_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block1_2_relu[0][0]']
conv3_block1_0_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_0_conv[0][0]']
ization)
conv3_block1_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_3_conv[0][0]']
ization)
conv3_block1_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_0_bn[0][0]',
'conv3_block1_3_bn[0][0]']
conv3_block1_out (Activation) (None, 28, 28, 512) 0 ['conv3_block1_add[0][0]']
conv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block1_out[0][0]']
conv3_block2_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_1_conv[0][0]']
ization)
conv3_block2_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_1_bn[0][0]']
n)
conv3_block2_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block2_1_relu[0][0]']
conv3_block2_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_2_conv[0][0]']
ization)
conv3_block2_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_2_bn[0][0]']
n)
conv3_block2_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block2_2_relu[0][0]']
conv3_block2_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block2_3_conv[0][0]']
ization)
conv3_block2_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_out[0][0]',
'conv3_block2_3_bn[0][0]']
conv3_block2_out (Activation) (None, 28, 28, 512) 0 ['conv3_block2_add[0][0]']
conv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block2_out[0][0]']
conv3_block3_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_1_conv[0][0]']
ization)
conv3_block3_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_1_bn[0][0]']
n)
conv3_block3_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block3_1_relu[0][0]']
conv3_block3_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_2_conv[0][0]']
ization)
conv3_block3_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_2_bn[0][0]']
n)
conv3_block3_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block3_2_relu[0][0]']
conv3_block3_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block3_3_conv[0][0]']
ization)
conv3_block3_add (Add) (None, 28, 28, 512) 0 ['conv3_block2_out[0][0]',
'conv3_block3_3_bn[0][0]']
conv3_block3_out (Activation) (None, 28, 28, 512) 0 ['conv3_block3_add[0][0]']
conv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block3_out[0][0]']
conv3_block4_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_1_conv[0][0]']
ization)
conv3_block4_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_1_bn[0][0]']
n)
conv3_block4_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block4_1_relu[0][0]']
conv3_block4_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_2_conv[0][0]']
ization)
conv3_block4_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_2_bn[0][0]']
n)
conv3_block4_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block4_2_relu[0][0]']
conv3_block4_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block4_3_conv[0][0]']
ization)
conv3_block4_add (Add) (None, 28, 28, 512) 0 ['conv3_block3_out[0][0]',
'conv3_block4_3_bn[0][0]']
conv3_block4_out (Activation) (None, 28, 28, 512) 0 ['conv3_block4_add[0][0]']
conv4_block1_1_conv (Conv2D) (None, 14, 14, 256) 131328 ['conv3_block4_out[0][0]']
conv4_block1_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_1_conv[0][0]']
ization)
conv4_block1_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_1_bn[0][0]']
n)
conv4_block1_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block1_1_relu[0][0]']
conv4_block1_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_2_conv[0][0]']
ization)
conv4_block1_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_2_bn[0][0]']
n)
conv4_block1_0_conv (Conv2D) (None, 14, 14, 1024 525312 ['conv3_block4_out[0][0]']
)
conv4_block1_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block1_2_relu[0][0]']
)
conv4_block1_0_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_0_conv[0][0]']
ization) )
conv4_block1_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_3_conv[0][0]']
ization) )
conv4_block1_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_0_bn[0][0]',
) 'conv4_block1_3_bn[0][0]']
conv4_block1_out (Activation) (None, 14, 14, 1024 0 ['conv4_block1_add[0][0]']
)
conv4_block2_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block1_out[0][0]']
conv4_block2_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_1_conv[0][0]']
ization)
conv4_block2_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_1_bn[0][0]']
n)
conv4_block2_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block2_1_relu[0][0]']
conv4_block2_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_2_conv[0][0]']
ization)
conv4_block2_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_2_bn[0][0]']
n)
conv4_block2_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block2_2_relu[0][0]']
)
conv4_block2_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block2_3_conv[0][0]']
ization) )
conv4_block2_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_out[0][0]',
) 'conv4_block2_3_bn[0][0]']
conv4_block2_out (Activation) (None, 14, 14, 1024 0 ['conv4_block2_add[0][0]']
)
conv4_block3_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block2_out[0][0]']
conv4_block3_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_1_conv[0][0]']
ization)
conv4_block3_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_1_bn[0][0]']
n)
conv4_block3_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block3_1_relu[0][0]']
conv4_block3_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_2_conv[0][0]']
ization)
conv4_block3_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_2_bn[0][0]']
n)
conv4_block3_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block3_2_relu[0][0]']
)
conv4_block3_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block3_3_conv[0][0]']
ization) )
conv4_block3_add (Add) (None, 14, 14, 1024 0 ['conv4_block2_out[0][0]',
) 'conv4_block3_3_bn[0][0]']
conv4_block3_out (Activation) (None, 14, 14, 1024 0 ['conv4_block3_add[0][0]']
)
conv4_block4_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block3_out[0][0]']
conv4_block4_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_1_conv[0][0]']
ization)
conv4_block4_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_1_bn[0][0]']
n)
conv4_block4_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block4_1_relu[0][0]']
conv4_block4_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_2_conv[0][0]']
ization)
conv4_block4_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_2_bn[0][0]']
n)
conv4_block4_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block4_2_relu[0][0]']
)
conv4_block4_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block4_3_conv[0][0]']
ization) )
conv4_block4_add (Add) (None, 14, 14, 1024 0 ['conv4_block3_out[0][0]',
) 'conv4_block4_3_bn[0][0]']
conv4_block4_out (Activation) (None, 14, 14, 1024 0 ['conv4_block4_add[0][0]']
)
conv4_block5_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block4_out[0][0]']
conv4_block5_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_1_conv[0][0]']
ization)
conv4_block5_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_1_bn[0][0]']
n)
conv4_block5_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block5_1_relu[0][0]']
conv4_block5_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_2_conv[0][0]']
ization)
conv4_block5_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_2_bn[0][0]']
n)
conv4_block5_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block5_2_relu[0][0]']
)
conv4_block5_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block5_3_conv[0][0]']
ization) )
conv4_block5_add (Add) (None, 14, 14, 1024 0 ['conv4_block4_out[0][0]',
) 'conv4_block5_3_bn[0][0]']
conv4_block5_out (Activation) (None, 14, 14, 1024 0 ['conv4_block5_add[0][0]']
)
conv4_block6_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block5_out[0][0]']
conv4_block6_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_1_conv[0][0]']
ization)
conv4_block6_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_1_bn[0][0]']
n)
conv4_block6_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block6_1_relu[0][0]']
conv4_block6_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_2_conv[0][0]']
ization)
conv4_block6_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_2_bn[0][0]']
n)
conv4_block6_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block6_2_relu[0][0]']
)
conv4_block6_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block6_3_conv[0][0]']
ization) )
conv4_block6_add (Add) (None, 14, 14, 1024 0 ['conv4_block5_out[0][0]',
) 'conv4_block6_3_bn[0][0]']
conv4_block6_out (Activation) (None, 14, 14, 1024 0 ['conv4_block6_add[0][0]']
)
conv5_block1_1_conv (Conv2D) (None, 7, 7, 512) 524800 ['conv4_block6_out[0][0]']
conv5_block1_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_1_conv[0][0]']
ization)
conv5_block1_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_1_bn[0][0]']
n)
conv5_block1_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block1_1_relu[0][0]']
conv5_block1_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_2_conv[0][0]']
ization)
conv5_block1_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_2_bn[0][0]']
n)
conv5_block1_0_conv (Conv2D) (None, 7, 7, 2048) 2099200 ['conv4_block6_out[0][0]']
conv5_block1_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block1_2_relu[0][0]']
conv5_block1_0_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_0_conv[0][0]']
ization)
conv5_block1_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_3_conv[0][0]']
ization)
conv5_block1_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_0_bn[0][0]',
'conv5_block1_3_bn[0][0]']
conv5_block1_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block1_add[0][0]']
conv5_block2_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block1_out[0][0]']
conv5_block2_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_1_conv[0][0]']
ization)
conv5_block2_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_1_bn[0][0]']
n)
conv5_block2_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block2_1_relu[0][0]']
conv5_block2_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_2_conv[0][0]']
ization)
conv5_block2_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_2_bn[0][0]']
n)
conv5_block2_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block2_2_relu[0][0]']
conv5_block2_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block2_3_conv[0][0]']
ization)
conv5_block2_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_out[0][0]',
'conv5_block2_3_bn[0][0]']
conv5_block2_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block2_add[0][0]']
conv5_block3_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block2_out[0][0]']
conv5_block3_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_1_conv[0][0]']
ization)
conv5_block3_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_1_bn[0][0]']
n)
conv5_block3_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block3_1_relu[0][0]']
conv5_block3_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_2_conv[0][0]']
ization)
conv5_block3_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_2_bn[0][0]']
n)
conv5_block3_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block3_2_relu[0][0]']
conv5_block3_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block3_3_conv[0][0]']
ization)
conv5_block3_add (Add) (None, 7, 7, 2048) 0 ['conv5_block2_out[0][0]',
'conv5_block3_3_bn[0][0]']
conv5_block3_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block3_add[0][0]']
avg_pool (GlobalAveragePooling (None, 2048) 0 ['conv5_block3_out[0][0]']
2D)
predictions (Dense) (None, 12) 24588 ['avg_pool[0][0]']
==================================================================================================
Total params: 23,612,300
Trainable params: 23,559,180
Non-trainable params: 53,120
__________________________________________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 7s 29ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.6504 0.4820 0.5537 305
anarrhichomenum 0.5552 0.5932 0.5736 322
brevantherum 0.2778 0.3195 0.2972 266
dulcamara 0.4500 0.0289 0.0544 311
herposolanum 0.1684 0.4767 0.2488 279
holophylla 0.6028 0.3014 0.4019 282
lasiocarpa 0.6964 0.5495 0.6143 313
melongena 0.3437 0.3885 0.3647 314
micracantha 0.2135 0.3813 0.2737 299
petota 0.2681 0.4700 0.3414 300
solanum 0.4595 0.0559 0.0997 304
torva 0.3784 0.0918 0.1478 305
accuracy 0.3456 3600
macro avg 0.4220 0.3449 0.3309 3600
weighted avg 0.4259 0.3456 0.3330 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
*****************************
* Started at 1455.921875... *
*****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 3.9492 - accuracy: 0.0811
Epoch 1: val_accuracy improved from -inf to 0.08125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 12s 123ms/step - loss: 3.9492 - accuracy: 0.0811 - val_loss: 14135.5830 - val_accuracy: 0.0812
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 2.8075 - accuracy: 0.1084
Epoch 2: val_accuracy did not improve from 0.08125
64/64 [==============================] - 7s 103ms/step - loss: 2.8075 - accuracy: 0.1084 - val_loss: 44.9530 - val_accuracy: 0.0812
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 2.5349 - accuracy: 0.1465
Epoch 3: val_accuracy did not improve from 0.08125
64/64 [==============================] - 7s 103ms/step - loss: 2.5349 - accuracy: 0.1465 - val_loss: 3.3257 - val_accuracy: 0.0688
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 2.4418 - accuracy: 0.1338
Epoch 4: val_accuracy improved from 0.08125 to 0.10000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 126ms/step - loss: 2.4418 - accuracy: 0.1338 - val_loss: 2.4350 - val_accuracy: 0.1000
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 2.3709 - accuracy: 0.1318
Epoch 5: val_accuracy improved from 0.10000 to 0.10625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 130ms/step - loss: 2.3709 - accuracy: 0.1318 - val_loss: 2.4182 - val_accuracy: 0.1063
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 2.3991 - accuracy: 0.1543
Epoch 6: val_accuracy improved from 0.10625 to 0.11875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 126ms/step - loss: 2.3991 - accuracy: 0.1543 - val_loss: 7.9676 - val_accuracy: 0.1187
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 2.3467 - accuracy: 0.1875
Epoch 7: val_accuracy improved from 0.11875 to 0.14375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 121ms/step - loss: 2.3467 - accuracy: 0.1875 - val_loss: 2.7334 - val_accuracy: 0.1437
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 2.3497 - accuracy: 0.1582
Epoch 8: val_accuracy did not improve from 0.14375
64/64 [==============================] - 7s 103ms/step - loss: 2.3497 - accuracy: 0.1582 - val_loss: 2.7379 - val_accuracy: 0.1187
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 2.3157 - accuracy: 0.1641
Epoch 9: val_accuracy improved from 0.14375 to 0.17500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 130ms/step - loss: 2.3157 - accuracy: 0.1641 - val_loss: 2.3707 - val_accuracy: 0.1750
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 2.2658 - accuracy: 0.1875
Epoch 10: val_accuracy did not improve from 0.17500
64/64 [==============================] - 7s 103ms/step - loss: 2.2658 - accuracy: 0.1875 - val_loss: 3.3253 - val_accuracy: 0.0938
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 2.2826 - accuracy: 0.1787
Epoch 11: val_accuracy improved from 0.17500 to 0.20000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 122ms/step - loss: 2.2826 - accuracy: 0.1787 - val_loss: 2.6546 - val_accuracy: 0.2000
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 2.2375 - accuracy: 0.1973
Epoch 12: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 103ms/step - loss: 2.2375 - accuracy: 0.1973 - val_loss: 2.7180 - val_accuracy: 0.1875
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 2.1769 - accuracy: 0.2051
Epoch 13: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 103ms/step - loss: 2.1769 - accuracy: 0.2051 - val_loss: 4.4300 - val_accuracy: 0.1125
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 2.1638 - accuracy: 0.2266
Epoch 14: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 103ms/step - loss: 2.1638 - accuracy: 0.2266 - val_loss: 4.3419 - val_accuracy: 0.1000
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 2.2292 - accuracy: 0.1934
Epoch 15: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 103ms/step - loss: 2.2292 - accuracy: 0.1934 - val_loss: 2.3835 - val_accuracy: 0.1937
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 2.1595 - accuracy: 0.2178
Epoch 16: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 103ms/step - loss: 2.1595 - accuracy: 0.2178 - val_loss: 2.4833 - val_accuracy: 0.1312
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 2.2145 - accuracy: 0.2393
Epoch 17: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 102ms/step - loss: 2.2145 - accuracy: 0.2393 - val_loss: 2.5687 - val_accuracy: 0.1375
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 2.1662 - accuracy: 0.2334
Epoch 18: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 103ms/step - loss: 2.1662 - accuracy: 0.2334 - val_loss: 7.8775 - val_accuracy: 0.1000
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 2.1583 - accuracy: 0.2148
Epoch 19: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 103ms/step - loss: 2.1583 - accuracy: 0.2148 - val_loss: 2.8118 - val_accuracy: 0.1562
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 2.1106 - accuracy: 0.2354
Epoch 20: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 103ms/step - loss: 2.1106 - accuracy: 0.2354 - val_loss: 8.0629 - val_accuracy: 0.1250
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 2.1213 - accuracy: 0.2559
Epoch 21: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 102ms/step - loss: 2.1213 - accuracy: 0.2559 - val_loss: 2.5138 - val_accuracy: 0.1813
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 2.0591 - accuracy: 0.2666
Epoch 22: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 102ms/step - loss: 2.0591 - accuracy: 0.2666 - val_loss: 2.5504 - val_accuracy: 0.1875
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 2.0989 - accuracy: 0.2568
Epoch 23: val_accuracy did not improve from 0.20000
64/64 [==============================] - 7s 102ms/step - loss: 2.0989 - accuracy: 0.2568 - val_loss: 4.8717 - val_accuracy: 0.0938
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 2.1386 - accuracy: 0.2354
Epoch 24: val_accuracy improved from 0.20000 to 0.21250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 127ms/step - loss: 2.1386 - accuracy: 0.2354 - val_loss: 2.2663 - val_accuracy: 0.2125
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 2.0665 - accuracy: 0.2686
Epoch 25: val_accuracy improved from 0.21250 to 0.22500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 121ms/step - loss: 2.0665 - accuracy: 0.2686 - val_loss: 3.0575 - val_accuracy: 0.2250
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 2.1153 - accuracy: 0.2363
Epoch 26: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 103ms/step - loss: 2.1153 - accuracy: 0.2363 - val_loss: 3.3487 - val_accuracy: 0.2062
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 2.0457 - accuracy: 0.2539
Epoch 27: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 2.0457 - accuracy: 0.2539 - val_loss: 2.5023 - val_accuracy: 0.2125
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 2.0508 - accuracy: 0.2695
Epoch 28: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 103ms/step - loss: 2.0508 - accuracy: 0.2695 - val_loss: 3.3212 - val_accuracy: 0.2000
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 2.0522 - accuracy: 0.2812
Epoch 29: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 103ms/step - loss: 2.0522 - accuracy: 0.2812 - val_loss: 3.6510 - val_accuracy: 0.1500
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 1.9626 - accuracy: 0.2832
Epoch 30: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 108ms/step - loss: 1.9626 - accuracy: 0.2832 - val_loss: 2.9248 - val_accuracy: 0.1937
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 1.9844 - accuracy: 0.2783
Epoch 31: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 1.9844 - accuracy: 0.2783 - val_loss: 2.7748 - val_accuracy: 0.2250
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 1.9672 - accuracy: 0.3096
Epoch 32: val_accuracy improved from 0.22500 to 0.29375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 121ms/step - loss: 1.9672 - accuracy: 0.3096 - val_loss: 2.2173 - val_accuracy: 0.2937
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 1.9647 - accuracy: 0.2861
Epoch 33: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 103ms/step - loss: 1.9647 - accuracy: 0.2861 - val_loss: 2.8646 - val_accuracy: 0.1688
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 1.9664 - accuracy: 0.2793
Epoch 34: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 103ms/step - loss: 1.9664 - accuracy: 0.2793 - val_loss: 2.8372 - val_accuracy: 0.2250
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 1.9486 - accuracy: 0.2939
Epoch 35: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 102ms/step - loss: 1.9486 - accuracy: 0.2939 - val_loss: 2.5567 - val_accuracy: 0.2375
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 1.9454 - accuracy: 0.2988
Epoch 36: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 104ms/step - loss: 1.9454 - accuracy: 0.2988 - val_loss: 3.9853 - val_accuracy: 0.1187
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 1.9873 - accuracy: 0.3086
Epoch 37: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 104ms/step - loss: 1.9873 - accuracy: 0.3086 - val_loss: 3.4425 - val_accuracy: 0.1500
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 1.9810 - accuracy: 0.2910
Epoch 38: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 102ms/step - loss: 1.9810 - accuracy: 0.2910 - val_loss: 2.9893 - val_accuracy: 0.1750
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 1.9651 - accuracy: 0.3135
Epoch 39: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 103ms/step - loss: 1.9651 - accuracy: 0.3135 - val_loss: 2.2778 - val_accuracy: 0.2313
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 1.9322 - accuracy: 0.3154
Epoch 40: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 102ms/step - loss: 1.9322 - accuracy: 0.3154 - val_loss: 4.9351 - val_accuracy: 0.1187
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 1.9389 - accuracy: 0.3105
Epoch 41: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 103ms/step - loss: 1.9389 - accuracy: 0.3105 - val_loss: 3.1086 - val_accuracy: 0.1875
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 1.9809 - accuracy: 0.2998
Epoch 42: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 104ms/step - loss: 1.9809 - accuracy: 0.2998 - val_loss: 2.0711 - val_accuracy: 0.2562
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 1.8847 - accuracy: 0.3135
Epoch 43: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 102ms/step - loss: 1.8847 - accuracy: 0.3135 - val_loss: 2.5209 - val_accuracy: 0.2500
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 1.9466 - accuracy: 0.2930
Epoch 44: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 103ms/step - loss: 1.9466 - accuracy: 0.2930 - val_loss: 2.3347 - val_accuracy: 0.2750
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 1.9199 - accuracy: 0.2969
Epoch 45: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 108ms/step - loss: 1.9199 - accuracy: 0.2969 - val_loss: 8.2957 - val_accuracy: 0.1063
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 1.9653 - accuracy: 0.2793
Epoch 46: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 103ms/step - loss: 1.9653 - accuracy: 0.2793 - val_loss: 2.7098 - val_accuracy: 0.1437
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 1.9521 - accuracy: 0.3076
Epoch 47: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 109ms/step - loss: 1.9521 - accuracy: 0.3076 - val_loss: 3.6050 - val_accuracy: 0.1500
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 1.9922 - accuracy: 0.2803
Epoch 48: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 108ms/step - loss: 1.9922 - accuracy: 0.2803 - val_loss: 2.4093 - val_accuracy: 0.2250
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 1.8879 - accuracy: 0.3223
Epoch 49: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 112ms/step - loss: 1.8879 - accuracy: 0.3223 - val_loss: 2.2718 - val_accuracy: 0.2188
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 1.9060 - accuracy: 0.3350
Epoch 50: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 109ms/step - loss: 1.9060 - accuracy: 0.3350 - val_loss: 2.1596 - val_accuracy: 0.1937
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 1.9327 - accuracy: 0.3135
Epoch 51: val_accuracy did not improve from 0.29375
64/64 [==============================] - 7s 109ms/step - loss: 1.9327 - accuracy: 0.3135 - val_loss: 2.4265 - val_accuracy: 0.2812
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.8341 - accuracy: 0.3486
Epoch 52: val_accuracy improved from 0.29375 to 0.34375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 129ms/step - loss: 1.8341 - accuracy: 0.3486 - val_loss: 2.0512 - val_accuracy: 0.3438
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 1.9426 - accuracy: 0.3105
Epoch 53: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 112ms/step - loss: 1.9426 - accuracy: 0.3105 - val_loss: 14.7166 - val_accuracy: 0.0812
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 1.9492 - accuracy: 0.3203
Epoch 54: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 109ms/step - loss: 1.9492 - accuracy: 0.3203 - val_loss: 2.2813 - val_accuracy: 0.3063
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 1.8767 - accuracy: 0.3164
Epoch 55: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 110ms/step - loss: 1.8767 - accuracy: 0.3164 - val_loss: 5.5869 - val_accuracy: 0.1500
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 1.8768 - accuracy: 0.3203
Epoch 56: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 111ms/step - loss: 1.8768 - accuracy: 0.3203 - val_loss: 2.4572 - val_accuracy: 0.2812
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.8180 - accuracy: 0.3506
Epoch 57: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 115ms/step - loss: 1.8180 - accuracy: 0.3506 - val_loss: 3.6784 - val_accuracy: 0.1813
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 1.8380 - accuracy: 0.3398
Epoch 58: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 112ms/step - loss: 1.8380 - accuracy: 0.3398 - val_loss: 2.2510 - val_accuracy: 0.2500
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 1.7936 - accuracy: 0.3604
Epoch 59: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 112ms/step - loss: 1.7936 - accuracy: 0.3604 - val_loss: 1.9983 - val_accuracy: 0.3125
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.8365 - accuracy: 0.3359
Epoch 60: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 110ms/step - loss: 1.8365 - accuracy: 0.3359 - val_loss: 7.2656 - val_accuracy: 0.1500
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.8503 - accuracy: 0.3516
Epoch 61: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 116ms/step - loss: 1.8503 - accuracy: 0.3516 - val_loss: 2.4111 - val_accuracy: 0.2250
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 1.8371 - accuracy: 0.3594
Epoch 62: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 113ms/step - loss: 1.8371 - accuracy: 0.3594 - val_loss: 2.0011 - val_accuracy: 0.3000
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.7545 - accuracy: 0.3730
Epoch 63: val_accuracy did not improve from 0.34375
64/64 [==============================] - 8s 118ms/step - loss: 1.7545 - accuracy: 0.3730 - val_loss: 2.0879 - val_accuracy: 0.3063
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 1.8539 - accuracy: 0.3398
Epoch 64: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 111ms/step - loss: 1.8539 - accuracy: 0.3398 - val_loss: 2.3298 - val_accuracy: 0.2812
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.8061 - accuracy: 0.3262
Epoch 65: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 110ms/step - loss: 1.8061 - accuracy: 0.3262 - val_loss: 1.8843 - val_accuracy: 0.3313
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.8332 - accuracy: 0.3623
Epoch 66: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 110ms/step - loss: 1.8332 - accuracy: 0.3623 - val_loss: 1.9289 - val_accuracy: 0.3187
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.8059 - accuracy: 0.3604
Epoch 67: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 110ms/step - loss: 1.8059 - accuracy: 0.3604 - val_loss: 2.0810 - val_accuracy: 0.3063
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 1.7866 - accuracy: 0.3652
Epoch 68: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 111ms/step - loss: 1.7866 - accuracy: 0.3652 - val_loss: 2.2435 - val_accuracy: 0.2875
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.8386 - accuracy: 0.3418
Epoch 69: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 110ms/step - loss: 1.8386 - accuracy: 0.3418 - val_loss: 2.5762 - val_accuracy: 0.2125
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.8262 - accuracy: 0.3525
Epoch 70: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 110ms/step - loss: 1.8262 - accuracy: 0.3525 - val_loss: 2.9027 - val_accuracy: 0.2062
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.7733 - accuracy: 0.3916
Epoch 71: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 110ms/step - loss: 1.7733 - accuracy: 0.3916 - val_loss: 2.2186 - val_accuracy: 0.2812
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.7191 - accuracy: 0.3838
Epoch 72: val_accuracy improved from 0.34375 to 0.35000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 9s 138ms/step - loss: 1.7191 - accuracy: 0.3838 - val_loss: 1.8722 - val_accuracy: 0.3500
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.7893 - accuracy: 0.3779
Epoch 73: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.7893 - accuracy: 0.3779 - val_loss: 1.9282 - val_accuracy: 0.2812
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.7682 - accuracy: 0.3643
Epoch 74: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.7682 - accuracy: 0.3643 - val_loss: 2.6997 - val_accuracy: 0.2000
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.7050 - accuracy: 0.4033
Epoch 75: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.7050 - accuracy: 0.4033 - val_loss: 3.1208 - val_accuracy: 0.2375
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.7500 - accuracy: 0.3809
Epoch 76: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.7500 - accuracy: 0.3809 - val_loss: 4.3332 - val_accuracy: 0.1562
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.7377 - accuracy: 0.3828
Epoch 77: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.7377 - accuracy: 0.3828 - val_loss: 2.6843 - val_accuracy: 0.2500
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.7189 - accuracy: 0.4092
Epoch 78: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 116ms/step - loss: 1.7189 - accuracy: 0.4092 - val_loss: 1.9775 - val_accuracy: 0.3125
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.7388 - accuracy: 0.3994
Epoch 79: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.7388 - accuracy: 0.3994 - val_loss: 2.8636 - val_accuracy: 0.1937
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.6868 - accuracy: 0.3838
Epoch 80: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 107ms/step - loss: 1.6868 - accuracy: 0.3838 - val_loss: 4.6486 - val_accuracy: 0.1875
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.7512 - accuracy: 0.3926
Epoch 81: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 102ms/step - loss: 1.7512 - accuracy: 0.3926 - val_loss: 5.6962 - val_accuracy: 0.1500
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.7561 - accuracy: 0.3779
Epoch 82: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.7561 - accuracy: 0.3779 - val_loss: 2.9087 - val_accuracy: 0.2125
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.6631 - accuracy: 0.4307
Epoch 83: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 102ms/step - loss: 1.6631 - accuracy: 0.4307 - val_loss: 2.8781 - val_accuracy: 0.2625
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.6681 - accuracy: 0.4131
Epoch 84: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 103ms/step - loss: 1.6681 - accuracy: 0.4131 - val_loss: 1.9893 - val_accuracy: 0.3250
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.6827 - accuracy: 0.4033
Epoch 85: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 103ms/step - loss: 1.6827 - accuracy: 0.4033 - val_loss: 2.2343 - val_accuracy: 0.3438
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.6781 - accuracy: 0.4141
Epoch 86: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 103ms/step - loss: 1.6781 - accuracy: 0.4141 - val_loss: 2.7768 - val_accuracy: 0.2375
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.6345 - accuracy: 0.4316
Epoch 87: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 103ms/step - loss: 1.6345 - accuracy: 0.4316 - val_loss: 2.3754 - val_accuracy: 0.2188
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.5694 - accuracy: 0.4434
Epoch 88: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 103ms/step - loss: 1.5694 - accuracy: 0.4434 - val_loss: 7.8269 - val_accuracy: 0.1562
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.6148 - accuracy: 0.4307
Epoch 89: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 103ms/step - loss: 1.6148 - accuracy: 0.4307 - val_loss: 2.0808 - val_accuracy: 0.3187
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.5805 - accuracy: 0.4395
Epoch 90: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 104ms/step - loss: 1.5805 - accuracy: 0.4395 - val_loss: 2.3865 - val_accuracy: 0.2062
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.6039 - accuracy: 0.4453
Epoch 91: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 103ms/step - loss: 1.6039 - accuracy: 0.4453 - val_loss: 2.0231 - val_accuracy: 0.3438
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.5993 - accuracy: 0.4502
Epoch 92: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 102ms/step - loss: 1.5993 - accuracy: 0.4502 - val_loss: 2.0590 - val_accuracy: 0.2875
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.6277 - accuracy: 0.4355
Epoch 93: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 103ms/step - loss: 1.6277 - accuracy: 0.4355 - val_loss: 2.6306 - val_accuracy: 0.2812
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.5953 - accuracy: 0.4307
Epoch 94: val_accuracy improved from 0.35000 to 0.37500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 122ms/step - loss: 1.5953 - accuracy: 0.4307 - val_loss: 1.8147 - val_accuracy: 0.3750
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.5531 - accuracy: 0.4639
Epoch 95: val_accuracy did not improve from 0.37500
64/64 [==============================] - 7s 103ms/step - loss: 1.5531 - accuracy: 0.4639 - val_loss: 5.4752 - val_accuracy: 0.1562
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.5612 - accuracy: 0.4375
Epoch 96: val_accuracy did not improve from 0.37500
64/64 [==============================] - 7s 103ms/step - loss: 1.5612 - accuracy: 0.4375 - val_loss: 4.5717 - val_accuracy: 0.2125
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.5974 - accuracy: 0.4326
Epoch 97: val_accuracy improved from 0.37500 to 0.43750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 9s 139ms/step - loss: 1.5974 - accuracy: 0.4326 - val_loss: 1.5994 - val_accuracy: 0.4375
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.5399 - accuracy: 0.4541
Epoch 98: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 104ms/step - loss: 1.5399 - accuracy: 0.4541 - val_loss: 1.9371 - val_accuracy: 0.3500
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.5440 - accuracy: 0.4609
Epoch 99: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 103ms/step - loss: 1.5440 - accuracy: 0.4609 - val_loss: 1.9682 - val_accuracy: 0.3438
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.4973 - accuracy: 0.4902
Epoch 100: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 102ms/step - loss: 1.4973 - accuracy: 0.4902 - val_loss: 2.1751 - val_accuracy: 0.3562
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.5338 - accuracy: 0.4678
Epoch 101: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 103ms/step - loss: 1.5338 - accuracy: 0.4678 - val_loss: 2.1382 - val_accuracy: 0.3500
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.5528 - accuracy: 0.4551
Epoch 102: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 103ms/step - loss: 1.5528 - accuracy: 0.4551 - val_loss: 4.1755 - val_accuracy: 0.1875
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.4915 - accuracy: 0.5020
Epoch 103: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 104ms/step - loss: 1.4915 - accuracy: 0.5020 - val_loss: 2.2785 - val_accuracy: 0.3125
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.4208 - accuracy: 0.5166
Epoch 104: val_accuracy did not improve from 0.43750
64/64 [==============================] - 7s 102ms/step - loss: 1.4208 - accuracy: 0.5166 - val_loss: 1.7211 - val_accuracy: 0.4250
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.4726 - accuracy: 0.4873
Epoch 105: val_accuracy improved from 0.43750 to 0.48750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_2.h5
64/64 [==============================] - 8s 121ms/step - loss: 1.4726 - accuracy: 0.4873 - val_loss: 1.6797 - val_accuracy: 0.4875
********* Training time: 1402.40625 s.
*****************
* Model Summary *
*****************
Model: "resnet50"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) [(None, 224, 224, 3 0 []
)]
conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 ['input_2[0][0]']
conv1_conv (Conv2D) (None, 112, 112, 64 9472 ['conv1_pad[0][0]']
)
conv1_bn (BatchNormalization) (None, 112, 112, 64 256 ['conv1_conv[0][0]']
)
conv1_relu (Activation) (None, 112, 112, 64 0 ['conv1_bn[0][0]']
)
pool1_pad (ZeroPadding2D) (None, 114, 114, 64 0 ['conv1_relu[0][0]']
)
pool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 ['pool1_pad[0][0]']
conv2_block1_1_conv (Conv2D) (None, 56, 56, 64) 4160 ['pool1_pool[0][0]']
conv2_block1_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_1_conv[0][0]']
ization)
conv2_block1_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_1_bn[0][0]']
n)
conv2_block1_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block1_1_relu[0][0]']
conv2_block1_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_2_conv[0][0]']
ization)
conv2_block1_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_2_bn[0][0]']
n)
conv2_block1_0_conv (Conv2D) (None, 56, 56, 256) 16640 ['pool1_pool[0][0]']
conv2_block1_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block1_2_relu[0][0]']
conv2_block1_0_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_0_conv[0][0]']
ization)
conv2_block1_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_3_conv[0][0]']
ization)
conv2_block1_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_0_bn[0][0]',
'conv2_block1_3_bn[0][0]']
conv2_block1_out (Activation) (None, 56, 56, 256) 0 ['conv2_block1_add[0][0]']
conv2_block2_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block1_out[0][0]']
conv2_block2_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_1_conv[0][0]']
ization)
conv2_block2_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_1_bn[0][0]']
n)
conv2_block2_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block2_1_relu[0][0]']
conv2_block2_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_2_conv[0][0]']
ization)
conv2_block2_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_2_bn[0][0]']
n)
conv2_block2_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block2_2_relu[0][0]']
conv2_block2_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block2_3_conv[0][0]']
ization)
conv2_block2_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_out[0][0]',
'conv2_block2_3_bn[0][0]']
conv2_block2_out (Activation) (None, 56, 56, 256) 0 ['conv2_block2_add[0][0]']
conv2_block3_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block2_out[0][0]']
conv2_block3_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_1_conv[0][0]']
ization)
conv2_block3_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_1_bn[0][0]']
n)
conv2_block3_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block3_1_relu[0][0]']
conv2_block3_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_2_conv[0][0]']
ization)
conv2_block3_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_2_bn[0][0]']
n)
conv2_block3_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block3_2_relu[0][0]']
conv2_block3_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block3_3_conv[0][0]']
ization)
conv2_block3_add (Add) (None, 56, 56, 256) 0 ['conv2_block2_out[0][0]',
'conv2_block3_3_bn[0][0]']
conv2_block3_out (Activation) (None, 56, 56, 256) 0 ['conv2_block3_add[0][0]']
conv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 32896 ['conv2_block3_out[0][0]']
conv3_block1_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_1_conv[0][0]']
ization)
conv3_block1_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_1_bn[0][0]']
n)
conv3_block1_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block1_1_relu[0][0]']
conv3_block1_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_2_conv[0][0]']
ization)
conv3_block1_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_2_bn[0][0]']
n)
conv3_block1_0_conv (Conv2D) (None, 28, 28, 512) 131584 ['conv2_block3_out[0][0]']
conv3_block1_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block1_2_relu[0][0]']
conv3_block1_0_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_0_conv[0][0]']
ization)
conv3_block1_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_3_conv[0][0]']
ization)
conv3_block1_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_0_bn[0][0]',
'conv3_block1_3_bn[0][0]']
conv3_block1_out (Activation) (None, 28, 28, 512) 0 ['conv3_block1_add[0][0]']
conv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block1_out[0][0]']
conv3_block2_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_1_conv[0][0]']
ization)
conv3_block2_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_1_bn[0][0]']
n)
conv3_block2_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block2_1_relu[0][0]']
conv3_block2_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_2_conv[0][0]']
ization)
conv3_block2_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_2_bn[0][0]']
n)
conv3_block2_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block2_2_relu[0][0]']
conv3_block2_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block2_3_conv[0][0]']
ization)
conv3_block2_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_out[0][0]',
'conv3_block2_3_bn[0][0]']
conv3_block2_out (Activation) (None, 28, 28, 512) 0 ['conv3_block2_add[0][0]']
conv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block2_out[0][0]']
conv3_block3_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_1_conv[0][0]']
ization)
conv3_block3_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_1_bn[0][0]']
n)
conv3_block3_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block3_1_relu[0][0]']
conv3_block3_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_2_conv[0][0]']
ization)
conv3_block3_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_2_bn[0][0]']
n)
conv3_block3_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block3_2_relu[0][0]']
conv3_block3_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block3_3_conv[0][0]']
ization)
conv3_block3_add (Add) (None, 28, 28, 512) 0 ['conv3_block2_out[0][0]',
'conv3_block3_3_bn[0][0]']
conv3_block3_out (Activation) (None, 28, 28, 512) 0 ['conv3_block3_add[0][0]']
conv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block3_out[0][0]']
conv3_block4_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_1_conv[0][0]']
ization)
conv3_block4_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_1_bn[0][0]']
n)
conv3_block4_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block4_1_relu[0][0]']
conv3_block4_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_2_conv[0][0]']
ization)
conv3_block4_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_2_bn[0][0]']
n)
conv3_block4_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block4_2_relu[0][0]']
conv3_block4_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block4_3_conv[0][0]']
ization)
conv3_block4_add (Add) (None, 28, 28, 512) 0 ['conv3_block3_out[0][0]',
'conv3_block4_3_bn[0][0]']
conv3_block4_out (Activation) (None, 28, 28, 512) 0 ['conv3_block4_add[0][0]']
conv4_block1_1_conv (Conv2D) (None, 14, 14, 256) 131328 ['conv3_block4_out[0][0]']
conv4_block1_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_1_conv[0][0]']
ization)
conv4_block1_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_1_bn[0][0]']
n)
conv4_block1_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block1_1_relu[0][0]']
conv4_block1_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_2_conv[0][0]']
ization)
conv4_block1_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_2_bn[0][0]']
n)
conv4_block1_0_conv (Conv2D) (None, 14, 14, 1024 525312 ['conv3_block4_out[0][0]']
)
conv4_block1_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block1_2_relu[0][0]']
)
conv4_block1_0_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_0_conv[0][0]']
ization) )
conv4_block1_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_3_conv[0][0]']
ization) )
conv4_block1_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_0_bn[0][0]',
) 'conv4_block1_3_bn[0][0]']
conv4_block1_out (Activation) (None, 14, 14, 1024 0 ['conv4_block1_add[0][0]']
)
conv4_block2_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block1_out[0][0]']
conv4_block2_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_1_conv[0][0]']
ization)
conv4_block2_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_1_bn[0][0]']
n)
conv4_block2_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block2_1_relu[0][0]']
conv4_block2_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_2_conv[0][0]']
ization)
conv4_block2_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_2_bn[0][0]']
n)
conv4_block2_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block2_2_relu[0][0]']
)
conv4_block2_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block2_3_conv[0][0]']
ization) )
conv4_block2_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_out[0][0]',
) 'conv4_block2_3_bn[0][0]']
conv4_block2_out (Activation) (None, 14, 14, 1024 0 ['conv4_block2_add[0][0]']
)
conv4_block3_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block2_out[0][0]']
conv4_block3_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_1_conv[0][0]']
ization)
conv4_block3_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_1_bn[0][0]']
n)
conv4_block3_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block3_1_relu[0][0]']
conv4_block3_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_2_conv[0][0]']
ization)
conv4_block3_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_2_bn[0][0]']
n)
conv4_block3_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block3_2_relu[0][0]']
)
conv4_block3_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block3_3_conv[0][0]']
ization) )
conv4_block3_add (Add) (None, 14, 14, 1024 0 ['conv4_block2_out[0][0]',
) 'conv4_block3_3_bn[0][0]']
conv4_block3_out (Activation) (None, 14, 14, 1024 0 ['conv4_block3_add[0][0]']
)
conv4_block4_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block3_out[0][0]']
conv4_block4_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_1_conv[0][0]']
ization)
conv4_block4_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_1_bn[0][0]']
n)
conv4_block4_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block4_1_relu[0][0]']
conv4_block4_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_2_conv[0][0]']
ization)
conv4_block4_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_2_bn[0][0]']
n)
conv4_block4_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block4_2_relu[0][0]']
)
conv4_block4_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block4_3_conv[0][0]']
ization) )
conv4_block4_add (Add) (None, 14, 14, 1024 0 ['conv4_block3_out[0][0]',
) 'conv4_block4_3_bn[0][0]']
conv4_block4_out (Activation) (None, 14, 14, 1024 0 ['conv4_block4_add[0][0]']
)
conv4_block5_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block4_out[0][0]']
conv4_block5_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_1_conv[0][0]']
ization)
conv4_block5_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_1_bn[0][0]']
n)
conv4_block5_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block5_1_relu[0][0]']
conv4_block5_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_2_conv[0][0]']
ization)
conv4_block5_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_2_bn[0][0]']
n)
conv4_block5_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block5_2_relu[0][0]']
)
conv4_block5_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block5_3_conv[0][0]']
ization) )
conv4_block5_add (Add) (None, 14, 14, 1024 0 ['conv4_block4_out[0][0]',
) 'conv4_block5_3_bn[0][0]']
conv4_block5_out (Activation) (None, 14, 14, 1024 0 ['conv4_block5_add[0][0]']
)
conv4_block6_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block5_out[0][0]']
conv4_block6_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_1_conv[0][0]']
ization)
conv4_block6_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_1_bn[0][0]']
n)
conv4_block6_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block6_1_relu[0][0]']
conv4_block6_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_2_conv[0][0]']
ization)
conv4_block6_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_2_bn[0][0]']
n)
conv4_block6_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block6_2_relu[0][0]']
)
conv4_block6_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block6_3_conv[0][0]']
ization) )
conv4_block6_add (Add) (None, 14, 14, 1024 0 ['conv4_block5_out[0][0]',
) 'conv4_block6_3_bn[0][0]']
conv4_block6_out (Activation) (None, 14, 14, 1024 0 ['conv4_block6_add[0][0]']
)
conv5_block1_1_conv (Conv2D) (None, 7, 7, 512) 524800 ['conv4_block6_out[0][0]']
conv5_block1_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_1_conv[0][0]']
ization)
conv5_block1_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_1_bn[0][0]']
n)
conv5_block1_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block1_1_relu[0][0]']
conv5_block1_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_2_conv[0][0]']
ization)
conv5_block1_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_2_bn[0][0]']
n)
conv5_block1_0_conv (Conv2D) (None, 7, 7, 2048) 2099200 ['conv4_block6_out[0][0]']
conv5_block1_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block1_2_relu[0][0]']
conv5_block1_0_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_0_conv[0][0]']
ization)
conv5_block1_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_3_conv[0][0]']
ization)
conv5_block1_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_0_bn[0][0]',
'conv5_block1_3_bn[0][0]']
conv5_block1_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block1_add[0][0]']
conv5_block2_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block1_out[0][0]']
conv5_block2_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_1_conv[0][0]']
ization)
conv5_block2_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_1_bn[0][0]']
n)
conv5_block2_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block2_1_relu[0][0]']
conv5_block2_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_2_conv[0][0]']
ization)
conv5_block2_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_2_bn[0][0]']
n)
conv5_block2_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block2_2_relu[0][0]']
conv5_block2_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block2_3_conv[0][0]']
ization)
conv5_block2_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_out[0][0]',
'conv5_block2_3_bn[0][0]']
conv5_block2_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block2_add[0][0]']
conv5_block3_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block2_out[0][0]']
conv5_block3_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_1_conv[0][0]']
ization)
conv5_block3_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_1_bn[0][0]']
n)
conv5_block3_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block3_1_relu[0][0]']
conv5_block3_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_2_conv[0][0]']
ization)
conv5_block3_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_2_bn[0][0]']
n)
conv5_block3_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block3_2_relu[0][0]']
conv5_block3_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block3_3_conv[0][0]']
ization)
conv5_block3_add (Add) (None, 7, 7, 2048) 0 ['conv5_block2_out[0][0]',
'conv5_block3_3_bn[0][0]']
conv5_block3_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block3_add[0][0]']
avg_pool (GlobalAveragePooling (None, 2048) 0 ['conv5_block3_out[0][0]']
2D)
predictions (Dense) (None, 12) 24588 ['avg_pool[0][0]']
==================================================================================================
Total params: 23,612,300
Trainable params: 23,559,180
Non-trainable params: 53,120
__________________________________________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 7s 27ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.7705 0.6787 0.7217 277
anarrhichomenum 0.6864 0.5143 0.5880 315
brevantherum 0.4089 0.6580 0.5044 307
dulcamara 0.5161 0.3232 0.3975 297
herposolanum 0.3236 0.5799 0.4154 288
holophylla 0.5069 0.5140 0.5104 286
lasiocarpa 0.7052 0.9140 0.7961 314
melongena 0.8045 0.4948 0.6128 291
micracantha 0.3454 0.4247 0.3810 292
petota 0.4078 0.5122 0.4541 328
solanum 0.6809 0.1053 0.1823 304
torva 0.3087 0.2359 0.2674 301
accuracy 0.4967 3600
macro avg 0.5387 0.4962 0.4859 3600
weighted avg 0.5380 0.4967 0.4856 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
*****************************
* Started at 2871.140625... *
*****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 4.0296 - accuracy: 0.0947
Epoch 1: val_accuracy improved from -inf to 0.09375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 11s 122ms/step - loss: 4.0296 - accuracy: 0.0947 - val_loss: 7.1974 - val_accuracy: 0.0938
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 3.0052 - accuracy: 0.1094
Epoch 2: val_accuracy did not improve from 0.09375
64/64 [==============================] - 7s 104ms/step - loss: 3.0052 - accuracy: 0.1094 - val_loss: 3251.5769 - val_accuracy: 0.0812
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 2.6046 - accuracy: 0.1270
Epoch 3: val_accuracy improved from 0.09375 to 0.16875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 120ms/step - loss: 2.6046 - accuracy: 0.1270 - val_loss: 3.1643 - val_accuracy: 0.1688
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 2.4387 - accuracy: 0.1455
Epoch 4: val_accuracy did not improve from 0.16875
64/64 [==============================] - 7s 104ms/step - loss: 2.4387 - accuracy: 0.1455 - val_loss: 3.5246 - val_accuracy: 0.0688
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 2.4326 - accuracy: 0.1709
Epoch 5: val_accuracy did not improve from 0.16875
64/64 [==============================] - 7s 104ms/step - loss: 2.4326 - accuracy: 0.1709 - val_loss: 5.4712 - val_accuracy: 0.1312
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 2.3068 - accuracy: 0.1885
Epoch 6: val_accuracy improved from 0.16875 to 0.20625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 126ms/step - loss: 2.3068 - accuracy: 0.1885 - val_loss: 3.7744 - val_accuracy: 0.2062
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 2.3320 - accuracy: 0.1924
Epoch 7: val_accuracy improved from 0.20625 to 0.22500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 128ms/step - loss: 2.3320 - accuracy: 0.1924 - val_loss: 8.3325 - val_accuracy: 0.2250
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 2.3480 - accuracy: 0.1807
Epoch 8: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 103ms/step - loss: 2.3480 - accuracy: 0.1807 - val_loss: 9.1093 - val_accuracy: 0.1187
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 2.2883 - accuracy: 0.2100
Epoch 9: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 2.2883 - accuracy: 0.2100 - val_loss: 2.4915 - val_accuracy: 0.2125
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 2.2440 - accuracy: 0.1924
Epoch 10: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 103ms/step - loss: 2.2440 - accuracy: 0.1924 - val_loss: 3.9342 - val_accuracy: 0.1937
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 2.2775 - accuracy: 0.1826
Epoch 11: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 2.2775 - accuracy: 0.1826 - val_loss: 4.1680 - val_accuracy: 0.1625
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 2.2751 - accuracy: 0.2109
Epoch 12: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 105ms/step - loss: 2.2751 - accuracy: 0.2109 - val_loss: 2.7190 - val_accuracy: 0.2125
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 2.2426 - accuracy: 0.2227
Epoch 13: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 2.2426 - accuracy: 0.2227 - val_loss: 11.9522 - val_accuracy: 0.1875
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 2.2032 - accuracy: 0.2305
Epoch 14: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 2.2032 - accuracy: 0.2305 - val_loss: 9.6028 - val_accuracy: 0.2125
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 2.2771 - accuracy: 0.1836
Epoch 15: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 2.2771 - accuracy: 0.1836 - val_loss: 3.8325 - val_accuracy: 0.1688
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 2.2090 - accuracy: 0.2217
Epoch 16: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 103ms/step - loss: 2.2090 - accuracy: 0.2217 - val_loss: 2.6072 - val_accuracy: 0.1875
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 2.1642 - accuracy: 0.2119
Epoch 17: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 2.1642 - accuracy: 0.2119 - val_loss: 4.5153 - val_accuracy: 0.2062
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 2.1629 - accuracy: 0.2314
Epoch 18: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 104ms/step - loss: 2.1629 - accuracy: 0.2314 - val_loss: 2.3892 - val_accuracy: 0.1937
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 2.1991 - accuracy: 0.2217
Epoch 19: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 103ms/step - loss: 2.1991 - accuracy: 0.2217 - val_loss: 3.9922 - val_accuracy: 0.1750
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 2.1257 - accuracy: 0.2334
Epoch 20: val_accuracy did not improve from 0.22500
64/64 [==============================] - 7s 103ms/step - loss: 2.1257 - accuracy: 0.2334 - val_loss: 4.5742 - val_accuracy: 0.1875
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 2.1139 - accuracy: 0.2539
Epoch 21: val_accuracy improved from 0.22500 to 0.23750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 124ms/step - loss: 2.1139 - accuracy: 0.2539 - val_loss: 2.4622 - val_accuracy: 0.2375
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 2.1390 - accuracy: 0.2344
Epoch 22: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 104ms/step - loss: 2.1390 - accuracy: 0.2344 - val_loss: 2.6034 - val_accuracy: 0.1625
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 2.1608 - accuracy: 0.2236
Epoch 23: val_accuracy improved from 0.23750 to 0.25000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 123ms/step - loss: 2.1608 - accuracy: 0.2236 - val_loss: 7.1883 - val_accuracy: 0.2500
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 2.1087 - accuracy: 0.2529
Epoch 24: val_accuracy did not improve from 0.25000
64/64 [==============================] - 7s 104ms/step - loss: 2.1087 - accuracy: 0.2529 - val_loss: 4.0482 - val_accuracy: 0.2438
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 2.0726 - accuracy: 0.2480
Epoch 25: val_accuracy did not improve from 0.25000
64/64 [==============================] - 7s 104ms/step - loss: 2.0726 - accuracy: 0.2480 - val_loss: 6.9413 - val_accuracy: 0.1500
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 2.0893 - accuracy: 0.2598
Epoch 26: val_accuracy did not improve from 0.25000
64/64 [==============================] - 7s 104ms/step - loss: 2.0893 - accuracy: 0.2598 - val_loss: 3.5752 - val_accuracy: 0.1312
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 2.0684 - accuracy: 0.2559
Epoch 27: val_accuracy improved from 0.25000 to 0.28125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 127ms/step - loss: 2.0684 - accuracy: 0.2559 - val_loss: 1.9530 - val_accuracy: 0.2812
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 2.0238 - accuracy: 0.2754
Epoch 28: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 104ms/step - loss: 2.0238 - accuracy: 0.2754 - val_loss: 2.4389 - val_accuracy: 0.2562
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 2.0457 - accuracy: 0.2646
Epoch 29: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 103ms/step - loss: 2.0457 - accuracy: 0.2646 - val_loss: 5.8522 - val_accuracy: 0.1063
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 2.0350 - accuracy: 0.2686
Epoch 30: val_accuracy improved from 0.28125 to 0.28750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 119ms/step - loss: 2.0350 - accuracy: 0.2686 - val_loss: 2.0168 - val_accuracy: 0.2875
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 1.9661 - accuracy: 0.2900
Epoch 31: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 103ms/step - loss: 1.9661 - accuracy: 0.2900 - val_loss: 2.9514 - val_accuracy: 0.2625
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 2.0070 - accuracy: 0.2715
Epoch 32: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 103ms/step - loss: 2.0070 - accuracy: 0.2715 - val_loss: 6.3692 - val_accuracy: 0.2875
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 1.9707 - accuracy: 0.2988
Epoch 33: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 104ms/step - loss: 1.9707 - accuracy: 0.2988 - val_loss: 1.9048 - val_accuracy: 0.2562
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 1.9882 - accuracy: 0.2861
Epoch 34: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 104ms/step - loss: 1.9882 - accuracy: 0.2861 - val_loss: 1.9168 - val_accuracy: 0.2812
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 1.9550 - accuracy: 0.3135
Epoch 35: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 103ms/step - loss: 1.9550 - accuracy: 0.3135 - val_loss: 2.3261 - val_accuracy: 0.2438
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 1.9450 - accuracy: 0.3271
Epoch 36: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 104ms/step - loss: 1.9450 - accuracy: 0.3271 - val_loss: 2.1259 - val_accuracy: 0.2875
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 1.9726 - accuracy: 0.3008
Epoch 37: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 103ms/step - loss: 1.9726 - accuracy: 0.3008 - val_loss: 2.2057 - val_accuracy: 0.2313
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 1.9732 - accuracy: 0.2822
Epoch 38: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 103ms/step - loss: 1.9732 - accuracy: 0.2822 - val_loss: 12.3066 - val_accuracy: 0.1000
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 1.9325 - accuracy: 0.3223
Epoch 39: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 103ms/step - loss: 1.9325 - accuracy: 0.3223 - val_loss: 2.4612 - val_accuracy: 0.2062
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 1.8898 - accuracy: 0.3311
Epoch 40: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 103ms/step - loss: 1.8898 - accuracy: 0.3311 - val_loss: 2.4262 - val_accuracy: 0.2250
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 1.8883 - accuracy: 0.3174
Epoch 41: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 103ms/step - loss: 1.8883 - accuracy: 0.3174 - val_loss: 3.8126 - val_accuracy: 0.2188
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 1.9383 - accuracy: 0.3018
Epoch 42: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 104ms/step - loss: 1.9383 - accuracy: 0.3018 - val_loss: 2.9020 - val_accuracy: 0.1875
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 1.9055 - accuracy: 0.3252
Epoch 43: val_accuracy did not improve from 0.28750
64/64 [==============================] - 7s 104ms/step - loss: 1.9055 - accuracy: 0.3252 - val_loss: 2.1219 - val_accuracy: 0.2812
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 1.9478 - accuracy: 0.3047
Epoch 44: val_accuracy improved from 0.28750 to 0.30625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 129ms/step - loss: 1.9478 - accuracy: 0.3047 - val_loss: 2.5076 - val_accuracy: 0.3063
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 1.9177 - accuracy: 0.3242
Epoch 45: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 105ms/step - loss: 1.9177 - accuracy: 0.3242 - val_loss: 3.6204 - val_accuracy: 0.2688
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 1.8720 - accuracy: 0.3223
Epoch 46: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 104ms/step - loss: 1.8720 - accuracy: 0.3223 - val_loss: 2.3644 - val_accuracy: 0.2750
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 1.8959 - accuracy: 0.3301
Epoch 47: val_accuracy improved from 0.30625 to 0.31875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 123ms/step - loss: 1.8959 - accuracy: 0.3301 - val_loss: 2.0881 - val_accuracy: 0.3187
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 1.8298 - accuracy: 0.3379
Epoch 48: val_accuracy improved from 0.31875 to 0.34375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 123ms/step - loss: 1.8298 - accuracy: 0.3379 - val_loss: 2.1810 - val_accuracy: 0.3438
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 1.8486 - accuracy: 0.3223
Epoch 49: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 104ms/step - loss: 1.8486 - accuracy: 0.3223 - val_loss: 2.0128 - val_accuracy: 0.3063
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 1.8809 - accuracy: 0.3213
Epoch 50: val_accuracy improved from 0.34375 to 0.36250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 120ms/step - loss: 1.8809 - accuracy: 0.3213 - val_loss: 2.3987 - val_accuracy: 0.3625
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 1.7984 - accuracy: 0.3574
Epoch 51: val_accuracy did not improve from 0.36250
64/64 [==============================] - 7s 105ms/step - loss: 1.7984 - accuracy: 0.3574 - val_loss: 2.6725 - val_accuracy: 0.2375
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.8586 - accuracy: 0.3428
Epoch 52: val_accuracy did not improve from 0.36250
64/64 [==============================] - 7s 104ms/step - loss: 1.8586 - accuracy: 0.3428 - val_loss: 2.2984 - val_accuracy: 0.3063
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 1.8844 - accuracy: 0.3340
Epoch 53: val_accuracy did not improve from 0.36250
64/64 [==============================] - 7s 104ms/step - loss: 1.8844 - accuracy: 0.3340 - val_loss: 2.4855 - val_accuracy: 0.2438
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 1.8220 - accuracy: 0.3350
Epoch 54: val_accuracy did not improve from 0.36250
64/64 [==============================] - 7s 106ms/step - loss: 1.8220 - accuracy: 0.3350 - val_loss: 1.8485 - val_accuracy: 0.3562
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 1.8297 - accuracy: 0.3379
Epoch 55: val_accuracy did not improve from 0.36250
64/64 [==============================] - 7s 109ms/step - loss: 1.8297 - accuracy: 0.3379 - val_loss: 2.0261 - val_accuracy: 0.3125
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 1.8248 - accuracy: 0.3555
Epoch 56: val_accuracy did not improve from 0.36250
64/64 [==============================] - 7s 109ms/step - loss: 1.8248 - accuracy: 0.3555 - val_loss: 2.0675 - val_accuracy: 0.3375
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.8080 - accuracy: 0.3574
Epoch 57: val_accuracy did not improve from 0.36250
64/64 [==============================] - 7s 109ms/step - loss: 1.8080 - accuracy: 0.3574 - val_loss: 3.1825 - val_accuracy: 0.2812
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 1.8447 - accuracy: 0.3643
Epoch 58: val_accuracy improved from 0.36250 to 0.39375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 9s 134ms/step - loss: 1.8447 - accuracy: 0.3643 - val_loss: 1.6772 - val_accuracy: 0.3938
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 1.8085 - accuracy: 0.3652
Epoch 59: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 113ms/step - loss: 1.8085 - accuracy: 0.3652 - val_loss: 4.8381 - val_accuracy: 0.2250
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.8360 - accuracy: 0.3389
Epoch 60: val_accuracy improved from 0.39375 to 0.41875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 129ms/step - loss: 1.8360 - accuracy: 0.3389 - val_loss: 1.7219 - val_accuracy: 0.4187
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.7985 - accuracy: 0.3457
Epoch 61: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.7985 - accuracy: 0.3457 - val_loss: 1.8486 - val_accuracy: 0.4062
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 1.8206 - accuracy: 0.3428
Epoch 62: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 112ms/step - loss: 1.8206 - accuracy: 0.3428 - val_loss: 2.7591 - val_accuracy: 0.3000
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.8268 - accuracy: 0.3564
Epoch 63: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 110ms/step - loss: 1.8268 - accuracy: 0.3564 - val_loss: 2.5044 - val_accuracy: 0.3250
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 1.8223 - accuracy: 0.3398
Epoch 64: val_accuracy did not improve from 0.41875
64/64 [==============================] - 8s 117ms/step - loss: 1.8223 - accuracy: 0.3398 - val_loss: 2.3936 - val_accuracy: 0.3250
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.7765 - accuracy: 0.3623
Epoch 65: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 111ms/step - loss: 1.7765 - accuracy: 0.3623 - val_loss: 2.2138 - val_accuracy: 0.2937
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.7692 - accuracy: 0.3818
Epoch 66: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 111ms/step - loss: 1.7692 - accuracy: 0.3818 - val_loss: 1.8702 - val_accuracy: 0.3187
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.8092 - accuracy: 0.3721
Epoch 67: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 111ms/step - loss: 1.8092 - accuracy: 0.3721 - val_loss: 2.1350 - val_accuracy: 0.3063
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 1.7941 - accuracy: 0.3496
Epoch 68: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.7941 - accuracy: 0.3496 - val_loss: 1.9316 - val_accuracy: 0.2875
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.7438 - accuracy: 0.3887
Epoch 69: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.7438 - accuracy: 0.3887 - val_loss: 1.7607 - val_accuracy: 0.3375
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.7641 - accuracy: 0.3926
Epoch 70: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 115ms/step - loss: 1.7641 - accuracy: 0.3926 - val_loss: 1.7295 - val_accuracy: 0.4000
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.6805 - accuracy: 0.4121
Epoch 71: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.6805 - accuracy: 0.4121 - val_loss: 3.7972 - val_accuracy: 0.2500
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.7743 - accuracy: 0.3965
Epoch 72: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.7743 - accuracy: 0.3965 - val_loss: 1.9582 - val_accuracy: 0.2875
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.7161 - accuracy: 0.4023
Epoch 73: val_accuracy did not improve from 0.41875
64/64 [==============================] - 8s 117ms/step - loss: 1.7161 - accuracy: 0.4023 - val_loss: 2.6636 - val_accuracy: 0.4125
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.7326 - accuracy: 0.3857
Epoch 74: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.7326 - accuracy: 0.3857 - val_loss: 1.9660 - val_accuracy: 0.3625
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.6958 - accuracy: 0.4102
Epoch 75: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 110ms/step - loss: 1.6958 - accuracy: 0.4102 - val_loss: 2.4611 - val_accuracy: 0.3875
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.7189 - accuracy: 0.4062
Epoch 76: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 116ms/step - loss: 1.7189 - accuracy: 0.4062 - val_loss: 1.9891 - val_accuracy: 0.3688
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.7217 - accuracy: 0.4023
Epoch 77: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 111ms/step - loss: 1.7217 - accuracy: 0.4023 - val_loss: 3.2608 - val_accuracy: 0.2250
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.6913 - accuracy: 0.4277
Epoch 78: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 111ms/step - loss: 1.6913 - accuracy: 0.4277 - val_loss: 5.0849 - val_accuracy: 0.2062
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.7674 - accuracy: 0.3711
Epoch 79: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 114ms/step - loss: 1.7674 - accuracy: 0.3711 - val_loss: 2.8523 - val_accuracy: 0.2500
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.6924 - accuracy: 0.4062
Epoch 80: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 112ms/step - loss: 1.6924 - accuracy: 0.4062 - val_loss: 3.0158 - val_accuracy: 0.3063
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.6796 - accuracy: 0.4141
Epoch 81: val_accuracy improved from 0.41875 to 0.45625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 128ms/step - loss: 1.6796 - accuracy: 0.4141 - val_loss: 1.5853 - val_accuracy: 0.4563
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.6614 - accuracy: 0.4111
Epoch 82: val_accuracy did not improve from 0.45625
64/64 [==============================] - 7s 109ms/step - loss: 1.6614 - accuracy: 0.4111 - val_loss: 4.5780 - val_accuracy: 0.3375
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.6871 - accuracy: 0.4180
Epoch 83: val_accuracy did not improve from 0.45625
64/64 [==============================] - 7s 109ms/step - loss: 1.6871 - accuracy: 0.4180 - val_loss: 4.4917 - val_accuracy: 0.2188
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.6745 - accuracy: 0.4043
Epoch 84: val_accuracy did not improve from 0.45625
64/64 [==============================] - 7s 108ms/step - loss: 1.6745 - accuracy: 0.4043 - val_loss: 2.8306 - val_accuracy: 0.3125
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.6063 - accuracy: 0.4385
Epoch 85: val_accuracy did not improve from 0.45625
64/64 [==============================] - 7s 108ms/step - loss: 1.6063 - accuracy: 0.4385 - val_loss: 1.8743 - val_accuracy: 0.3688
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.6923 - accuracy: 0.4346
Epoch 86: val_accuracy did not improve from 0.45625
64/64 [==============================] - 7s 109ms/step - loss: 1.6923 - accuracy: 0.4346 - val_loss: 2.2131 - val_accuracy: 0.3313
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.7203 - accuracy: 0.3945
Epoch 87: val_accuracy improved from 0.45625 to 0.51250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 129ms/step - loss: 1.7203 - accuracy: 0.3945 - val_loss: 1.5148 - val_accuracy: 0.5125
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.6009 - accuracy: 0.4385
Epoch 88: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 108ms/step - loss: 1.6009 - accuracy: 0.4385 - val_loss: 2.0619 - val_accuracy: 0.3562
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.6140 - accuracy: 0.4307
Epoch 89: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 108ms/step - loss: 1.6140 - accuracy: 0.4307 - val_loss: 2.7040 - val_accuracy: 0.3000
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.6442 - accuracy: 0.4258
Epoch 90: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 109ms/step - loss: 1.6442 - accuracy: 0.4258 - val_loss: 5.8102 - val_accuracy: 0.2000
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.5978 - accuracy: 0.4492
Epoch 91: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 110ms/step - loss: 1.5978 - accuracy: 0.4492 - val_loss: 1.4858 - val_accuracy: 0.4563
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.6437 - accuracy: 0.4404
Epoch 92: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 108ms/step - loss: 1.6437 - accuracy: 0.4404 - val_loss: 2.0178 - val_accuracy: 0.3438
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.5956 - accuracy: 0.4658
Epoch 93: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 110ms/step - loss: 1.5956 - accuracy: 0.4658 - val_loss: 1.8730 - val_accuracy: 0.3875
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.6105 - accuracy: 0.4346
Epoch 94: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 114ms/step - loss: 1.6105 - accuracy: 0.4346 - val_loss: 2.2021 - val_accuracy: 0.4000
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.6090 - accuracy: 0.4424
Epoch 95: val_accuracy did not improve from 0.51250
64/64 [==============================] - 7s 111ms/step - loss: 1.6090 - accuracy: 0.4424 - val_loss: 1.8643 - val_accuracy: 0.4000
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.5793 - accuracy: 0.4219
Epoch 96: val_accuracy improved from 0.51250 to 0.53750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_3.h5
64/64 [==============================] - 8s 128ms/step - loss: 1.5793 - accuracy: 0.4219 - val_loss: 1.3562 - val_accuracy: 0.5375
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.5527 - accuracy: 0.4668
Epoch 97: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 108ms/step - loss: 1.5527 - accuracy: 0.4668 - val_loss: 7.9603 - val_accuracy: 0.1562
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.5475 - accuracy: 0.4512
Epoch 98: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 109ms/step - loss: 1.5475 - accuracy: 0.4512 - val_loss: 2.3336 - val_accuracy: 0.3375
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.5688 - accuracy: 0.4502
Epoch 99: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 109ms/step - loss: 1.5688 - accuracy: 0.4502 - val_loss: 2.7253 - val_accuracy: 0.3313
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.4839 - accuracy: 0.4824
Epoch 100: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 108ms/step - loss: 1.4839 - accuracy: 0.4824 - val_loss: 5.2123 - val_accuracy: 0.2125
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.5696 - accuracy: 0.4463
Epoch 101: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 108ms/step - loss: 1.5696 - accuracy: 0.4463 - val_loss: 1.7663 - val_accuracy: 0.3750
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.4943 - accuracy: 0.4951
Epoch 102: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 108ms/step - loss: 1.4943 - accuracy: 0.4951 - val_loss: 6.1871 - val_accuracy: 0.2000
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.5635 - accuracy: 0.4590
Epoch 103: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 108ms/step - loss: 1.5635 - accuracy: 0.4590 - val_loss: 1.9740 - val_accuracy: 0.4062
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.4913 - accuracy: 0.5088
Epoch 104: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 108ms/step - loss: 1.4913 - accuracy: 0.5088 - val_loss: 7.5735 - val_accuracy: 0.1688
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.4838 - accuracy: 0.4707
Epoch 105: val_accuracy did not improve from 0.53750
64/64 [==============================] - 7s 108ms/step - loss: 1.4838 - accuracy: 0.4707 - val_loss: 8.0386 - val_accuracy: 0.1937
********* Training time: 1406.03125 s.
*****************
* Model Summary *
*****************
Model: "resnet50"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_3 (InputLayer) [(None, 224, 224, 3 0 []
)]
conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 ['input_3[0][0]']
conv1_conv (Conv2D) (None, 112, 112, 64 9472 ['conv1_pad[0][0]']
)
conv1_bn (BatchNormalization) (None, 112, 112, 64 256 ['conv1_conv[0][0]']
)
conv1_relu (Activation) (None, 112, 112, 64 0 ['conv1_bn[0][0]']
)
pool1_pad (ZeroPadding2D) (None, 114, 114, 64 0 ['conv1_relu[0][0]']
)
pool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 ['pool1_pad[0][0]']
conv2_block1_1_conv (Conv2D) (None, 56, 56, 64) 4160 ['pool1_pool[0][0]']
conv2_block1_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_1_conv[0][0]']
ization)
conv2_block1_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_1_bn[0][0]']
n)
conv2_block1_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block1_1_relu[0][0]']
conv2_block1_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_2_conv[0][0]']
ization)
conv2_block1_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_2_bn[0][0]']
n)
conv2_block1_0_conv (Conv2D) (None, 56, 56, 256) 16640 ['pool1_pool[0][0]']
conv2_block1_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block1_2_relu[0][0]']
conv2_block1_0_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_0_conv[0][0]']
ization)
conv2_block1_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_3_conv[0][0]']
ization)
conv2_block1_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_0_bn[0][0]',
'conv2_block1_3_bn[0][0]']
conv2_block1_out (Activation) (None, 56, 56, 256) 0 ['conv2_block1_add[0][0]']
conv2_block2_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block1_out[0][0]']
conv2_block2_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_1_conv[0][0]']
ization)
conv2_block2_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_1_bn[0][0]']
n)
conv2_block2_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block2_1_relu[0][0]']
conv2_block2_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_2_conv[0][0]']
ization)
conv2_block2_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_2_bn[0][0]']
n)
conv2_block2_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block2_2_relu[0][0]']
conv2_block2_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block2_3_conv[0][0]']
ization)
conv2_block2_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_out[0][0]',
'conv2_block2_3_bn[0][0]']
conv2_block2_out (Activation) (None, 56, 56, 256) 0 ['conv2_block2_add[0][0]']
conv2_block3_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block2_out[0][0]']
conv2_block3_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_1_conv[0][0]']
ization)
conv2_block3_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_1_bn[0][0]']
n)
conv2_block3_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block3_1_relu[0][0]']
conv2_block3_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_2_conv[0][0]']
ization)
conv2_block3_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_2_bn[0][0]']
n)
conv2_block3_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block3_2_relu[0][0]']
conv2_block3_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block3_3_conv[0][0]']
ization)
conv2_block3_add (Add) (None, 56, 56, 256) 0 ['conv2_block2_out[0][0]',
'conv2_block3_3_bn[0][0]']
conv2_block3_out (Activation) (None, 56, 56, 256) 0 ['conv2_block3_add[0][0]']
conv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 32896 ['conv2_block3_out[0][0]']
conv3_block1_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_1_conv[0][0]']
ization)
conv3_block1_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_1_bn[0][0]']
n)
conv3_block1_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block1_1_relu[0][0]']
conv3_block1_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_2_conv[0][0]']
ization)
conv3_block1_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_2_bn[0][0]']
n)
conv3_block1_0_conv (Conv2D) (None, 28, 28, 512) 131584 ['conv2_block3_out[0][0]']
conv3_block1_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block1_2_relu[0][0]']
conv3_block1_0_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_0_conv[0][0]']
ization)
conv3_block1_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_3_conv[0][0]']
ization)
conv3_block1_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_0_bn[0][0]',
'conv3_block1_3_bn[0][0]']
conv3_block1_out (Activation) (None, 28, 28, 512) 0 ['conv3_block1_add[0][0]']
conv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block1_out[0][0]']
conv3_block2_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_1_conv[0][0]']
ization)
conv3_block2_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_1_bn[0][0]']
n)
conv3_block2_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block2_1_relu[0][0]']
conv3_block2_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_2_conv[0][0]']
ization)
conv3_block2_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_2_bn[0][0]']
n)
conv3_block2_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block2_2_relu[0][0]']
conv3_block2_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block2_3_conv[0][0]']
ization)
conv3_block2_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_out[0][0]',
'conv3_block2_3_bn[0][0]']
conv3_block2_out (Activation) (None, 28, 28, 512) 0 ['conv3_block2_add[0][0]']
conv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block2_out[0][0]']
conv3_block3_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_1_conv[0][0]']
ization)
conv3_block3_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_1_bn[0][0]']
n)
conv3_block3_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block3_1_relu[0][0]']
conv3_block3_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_2_conv[0][0]']
ization)
conv3_block3_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_2_bn[0][0]']
n)
conv3_block3_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block3_2_relu[0][0]']
conv3_block3_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block3_3_conv[0][0]']
ization)
conv3_block3_add (Add) (None, 28, 28, 512) 0 ['conv3_block2_out[0][0]',
'conv3_block3_3_bn[0][0]']
conv3_block3_out (Activation) (None, 28, 28, 512) 0 ['conv3_block3_add[0][0]']
conv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block3_out[0][0]']
conv3_block4_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_1_conv[0][0]']
ization)
conv3_block4_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_1_bn[0][0]']
n)
conv3_block4_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block4_1_relu[0][0]']
conv3_block4_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_2_conv[0][0]']
ization)
conv3_block4_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_2_bn[0][0]']
n)
conv3_block4_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block4_2_relu[0][0]']
conv3_block4_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block4_3_conv[0][0]']
ization)
conv3_block4_add (Add) (None, 28, 28, 512) 0 ['conv3_block3_out[0][0]',
'conv3_block4_3_bn[0][0]']
conv3_block4_out (Activation) (None, 28, 28, 512) 0 ['conv3_block4_add[0][0]']
conv4_block1_1_conv (Conv2D) (None, 14, 14, 256) 131328 ['conv3_block4_out[0][0]']
conv4_block1_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_1_conv[0][0]']
ization)
conv4_block1_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_1_bn[0][0]']
n)
conv4_block1_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block1_1_relu[0][0]']
conv4_block1_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_2_conv[0][0]']
ization)
conv4_block1_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_2_bn[0][0]']
n)
conv4_block1_0_conv (Conv2D) (None, 14, 14, 1024 525312 ['conv3_block4_out[0][0]']
)
conv4_block1_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block1_2_relu[0][0]']
)
conv4_block1_0_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_0_conv[0][0]']
ization) )
conv4_block1_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_3_conv[0][0]']
ization) )
conv4_block1_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_0_bn[0][0]',
) 'conv4_block1_3_bn[0][0]']
conv4_block1_out (Activation) (None, 14, 14, 1024 0 ['conv4_block1_add[0][0]']
)
conv4_block2_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block1_out[0][0]']
conv4_block2_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_1_conv[0][0]']
ization)
conv4_block2_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_1_bn[0][0]']
n)
conv4_block2_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block2_1_relu[0][0]']
conv4_block2_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_2_conv[0][0]']
ization)
conv4_block2_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_2_bn[0][0]']
n)
conv4_block2_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block2_2_relu[0][0]']
)
conv4_block2_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block2_3_conv[0][0]']
ization) )
conv4_block2_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_out[0][0]',
) 'conv4_block2_3_bn[0][0]']
conv4_block2_out (Activation) (None, 14, 14, 1024 0 ['conv4_block2_add[0][0]']
)
conv4_block3_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block2_out[0][0]']
conv4_block3_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_1_conv[0][0]']
ization)
conv4_block3_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_1_bn[0][0]']
n)
conv4_block3_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block3_1_relu[0][0]']
conv4_block3_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_2_conv[0][0]']
ization)
conv4_block3_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_2_bn[0][0]']
n)
conv4_block3_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block3_2_relu[0][0]']
)
conv4_block3_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block3_3_conv[0][0]']
ization) )
conv4_block3_add (Add) (None, 14, 14, 1024 0 ['conv4_block2_out[0][0]',
) 'conv4_block3_3_bn[0][0]']
conv4_block3_out (Activation) (None, 14, 14, 1024 0 ['conv4_block3_add[0][0]']
)
conv4_block4_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block3_out[0][0]']
conv4_block4_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_1_conv[0][0]']
ization)
conv4_block4_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_1_bn[0][0]']
n)
conv4_block4_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block4_1_relu[0][0]']
conv4_block4_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_2_conv[0][0]']
ization)
conv4_block4_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_2_bn[0][0]']
n)
conv4_block4_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block4_2_relu[0][0]']
)
conv4_block4_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block4_3_conv[0][0]']
ization) )
conv4_block4_add (Add) (None, 14, 14, 1024 0 ['conv4_block3_out[0][0]',
) 'conv4_block4_3_bn[0][0]']
conv4_block4_out (Activation) (None, 14, 14, 1024 0 ['conv4_block4_add[0][0]']
)
conv4_block5_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block4_out[0][0]']
conv4_block5_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_1_conv[0][0]']
ization)
conv4_block5_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_1_bn[0][0]']
n)
conv4_block5_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block5_1_relu[0][0]']
conv4_block5_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_2_conv[0][0]']
ization)
conv4_block5_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_2_bn[0][0]']
n)
conv4_block5_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block5_2_relu[0][0]']
)
conv4_block5_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block5_3_conv[0][0]']
ization) )
conv4_block5_add (Add) (None, 14, 14, 1024 0 ['conv4_block4_out[0][0]',
) 'conv4_block5_3_bn[0][0]']
conv4_block5_out (Activation) (None, 14, 14, 1024 0 ['conv4_block5_add[0][0]']
)
conv4_block6_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block5_out[0][0]']
conv4_block6_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_1_conv[0][0]']
ization)
conv4_block6_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_1_bn[0][0]']
n)
conv4_block6_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block6_1_relu[0][0]']
conv4_block6_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_2_conv[0][0]']
ization)
conv4_block6_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_2_bn[0][0]']
n)
conv4_block6_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block6_2_relu[0][0]']
)
conv4_block6_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block6_3_conv[0][0]']
ization) )
conv4_block6_add (Add) (None, 14, 14, 1024 0 ['conv4_block5_out[0][0]',
) 'conv4_block6_3_bn[0][0]']
conv4_block6_out (Activation) (None, 14, 14, 1024 0 ['conv4_block6_add[0][0]']
)
conv5_block1_1_conv (Conv2D) (None, 7, 7, 512) 524800 ['conv4_block6_out[0][0]']
conv5_block1_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_1_conv[0][0]']
ization)
conv5_block1_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_1_bn[0][0]']
n)
conv5_block1_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block1_1_relu[0][0]']
conv5_block1_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_2_conv[0][0]']
ization)
conv5_block1_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_2_bn[0][0]']
n)
conv5_block1_0_conv (Conv2D) (None, 7, 7, 2048) 2099200 ['conv4_block6_out[0][0]']
conv5_block1_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block1_2_relu[0][0]']
conv5_block1_0_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_0_conv[0][0]']
ization)
conv5_block1_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_3_conv[0][0]']
ization)
conv5_block1_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_0_bn[0][0]',
'conv5_block1_3_bn[0][0]']
conv5_block1_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block1_add[0][0]']
conv5_block2_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block1_out[0][0]']
conv5_block2_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_1_conv[0][0]']
ization)
conv5_block2_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_1_bn[0][0]']
n)
conv5_block2_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block2_1_relu[0][0]']
conv5_block2_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_2_conv[0][0]']
ization)
conv5_block2_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_2_bn[0][0]']
n)
conv5_block2_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block2_2_relu[0][0]']
conv5_block2_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block2_3_conv[0][0]']
ization)
conv5_block2_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_out[0][0]',
'conv5_block2_3_bn[0][0]']
conv5_block2_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block2_add[0][0]']
conv5_block3_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block2_out[0][0]']
conv5_block3_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_1_conv[0][0]']
ization)
conv5_block3_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_1_bn[0][0]']
n)
conv5_block3_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block3_1_relu[0][0]']
conv5_block3_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_2_conv[0][0]']
ization)
conv5_block3_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_2_bn[0][0]']
n)
conv5_block3_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block3_2_relu[0][0]']
conv5_block3_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block3_3_conv[0][0]']
ization)
conv5_block3_add (Add) (None, 7, 7, 2048) 0 ['conv5_block2_out[0][0]',
'conv5_block3_3_bn[0][0]']
conv5_block3_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block3_add[0][0]']
avg_pool (GlobalAveragePooling (None, 2048) 0 ['conv5_block3_out[0][0]']
2D)
predictions (Dense) (None, 12) 24588 ['avg_pool[0][0]']
==================================================================================================
Total params: 23,612,300
Trainable params: 23,559,180
Non-trainable params: 53,120
__________________________________________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 7s 27ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.3663 0.3964 0.3808 280
anarrhichomenum 0.1085 1.0000 0.1958 319
brevantherum 0.0000 0.0000 0.0000 267
dulcamara 0.2900 0.0921 0.1398 315
herposolanum 0.5946 0.0733 0.1306 300
holophylla 0.4286 0.0537 0.0955 335
lasiocarpa 0.9153 0.1748 0.2935 309
melongena 0.5000 0.0035 0.0070 285
micracantha 0.2131 0.0442 0.0732 294
petota 0.1143 0.0133 0.0238 301
solanum 0.2500 0.0033 0.0066 299
torva 0.4118 0.0236 0.0447 296
accuracy 0.1608 3600
macro avg 0.3494 0.1565 0.1159 3600
weighted avg 0.3526 0.1608 0.1169 3600
********************
* Confusion Matrix *
********************
C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
************************************** * Train/Val Accuracy and Loss graphs * **************************************
****************************
* Started at 4290.03125... *
****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 3.9890 - accuracy: 0.0918
Epoch 1: val_accuracy improved from -inf to 0.08750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 12s 125ms/step - loss: 3.9890 - accuracy: 0.0918 - val_loss: 56.2198 - val_accuracy: 0.0875
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 2.8317 - accuracy: 0.1094
Epoch 2: val_accuracy did not improve from 0.08750
64/64 [==============================] - 7s 108ms/step - loss: 2.8317 - accuracy: 0.1094 - val_loss: 1518.9069 - val_accuracy: 0.0750
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 2.6752 - accuracy: 0.1299
Epoch 3: val_accuracy improved from 0.08750 to 0.10625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 8s 123ms/step - loss: 2.6752 - accuracy: 0.1299 - val_loss: 11.1196 - val_accuracy: 0.1063
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 2.5130 - accuracy: 0.1328
Epoch 4: val_accuracy did not improve from 0.10625
64/64 [==============================] - 7s 109ms/step - loss: 2.5130 - accuracy: 0.1328 - val_loss: 213.6284 - val_accuracy: 0.0812
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 2.5858 - accuracy: 0.1230
Epoch 5: val_accuracy did not improve from 0.10625
64/64 [==============================] - 7s 109ms/step - loss: 2.5858 - accuracy: 0.1230 - val_loss: 42.7997 - val_accuracy: 0.1000
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 2.4902 - accuracy: 0.1260
Epoch 6: val_accuracy did not improve from 0.10625
64/64 [==============================] - 7s 108ms/step - loss: 2.4902 - accuracy: 0.1260 - val_loss: 2.9617 - val_accuracy: 0.1000
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 2.4348 - accuracy: 0.1582
Epoch 7: val_accuracy improved from 0.10625 to 0.11875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 8s 131ms/step - loss: 2.4348 - accuracy: 0.1582 - val_loss: 25.0416 - val_accuracy: 0.1187
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 2.4074 - accuracy: 0.1406
Epoch 8: val_accuracy did not improve from 0.11875
64/64 [==============================] - 7s 108ms/step - loss: 2.4074 - accuracy: 0.1406 - val_loss: 5.2432 - val_accuracy: 0.1063
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 2.3631 - accuracy: 0.1680
Epoch 9: val_accuracy did not improve from 0.11875
64/64 [==============================] - 7s 108ms/step - loss: 2.3631 - accuracy: 0.1680 - val_loss: 26.4269 - val_accuracy: 0.0437
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 2.2893 - accuracy: 0.1855
Epoch 10: val_accuracy did not improve from 0.11875
64/64 [==============================] - 7s 108ms/step - loss: 2.2893 - accuracy: 0.1855 - val_loss: 14.9559 - val_accuracy: 0.0562
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 2.2328 - accuracy: 0.1807
Epoch 11: val_accuracy improved from 0.11875 to 0.13125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 8s 129ms/step - loss: 2.2328 - accuracy: 0.1807 - val_loss: 3.4483 - val_accuracy: 0.1312
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 2.2464 - accuracy: 0.1816
Epoch 12: val_accuracy improved from 0.13125 to 0.17500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 9s 133ms/step - loss: 2.2464 - accuracy: 0.1816 - val_loss: 5.0422 - val_accuracy: 0.1750
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 2.2326 - accuracy: 0.1963
Epoch 13: val_accuracy did not improve from 0.17500
64/64 [==============================] - 7s 109ms/step - loss: 2.2326 - accuracy: 0.1963 - val_loss: 20.9180 - val_accuracy: 0.1375
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 2.1849 - accuracy: 0.2012
Epoch 14: val_accuracy did not improve from 0.17500
64/64 [==============================] - 7s 109ms/step - loss: 2.1849 - accuracy: 0.2012 - val_loss: 3.1571 - val_accuracy: 0.1437
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 2.2083 - accuracy: 0.2041
Epoch 15: val_accuracy did not improve from 0.17500
64/64 [==============================] - 7s 108ms/step - loss: 2.2083 - accuracy: 0.2041 - val_loss: 4.4903 - val_accuracy: 0.1750
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 2.1849 - accuracy: 0.2109
Epoch 16: val_accuracy improved from 0.17500 to 0.18750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 9s 136ms/step - loss: 2.1849 - accuracy: 0.2109 - val_loss: 2.4158 - val_accuracy: 0.1875
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 2.1788 - accuracy: 0.2070
Epoch 17: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 108ms/step - loss: 2.1788 - accuracy: 0.2070 - val_loss: 2.6501 - val_accuracy: 0.1813
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 2.1325 - accuracy: 0.2197
Epoch 18: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 108ms/step - loss: 2.1325 - accuracy: 0.2197 - val_loss: 2.4996 - val_accuracy: 0.1625
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 2.1183 - accuracy: 0.2188
Epoch 19: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 108ms/step - loss: 2.1183 - accuracy: 0.2188 - val_loss: 4.5839 - val_accuracy: 0.0812
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 2.1559 - accuracy: 0.2207
Epoch 20: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 109ms/step - loss: 2.1559 - accuracy: 0.2207 - val_loss: 2.2319 - val_accuracy: 0.1875
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 2.0922 - accuracy: 0.2109
Epoch 21: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 115ms/step - loss: 2.0922 - accuracy: 0.2109 - val_loss: 2.7259 - val_accuracy: 0.1000
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 2.1204 - accuracy: 0.2021
Epoch 22: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 110ms/step - loss: 2.1204 - accuracy: 0.2021 - val_loss: 2.2841 - val_accuracy: 0.1813
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 2.0776 - accuracy: 0.2461
Epoch 23: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 109ms/step - loss: 2.0776 - accuracy: 0.2461 - val_loss: 4.1128 - val_accuracy: 0.1375
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 2.1027 - accuracy: 0.2314
Epoch 24: val_accuracy improved from 0.18750 to 0.20625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 9s 136ms/step - loss: 2.1027 - accuracy: 0.2314 - val_loss: 2.9314 - val_accuracy: 0.2062
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 2.0084 - accuracy: 0.2773
Epoch 25: val_accuracy did not improve from 0.20625
64/64 [==============================] - 7s 108ms/step - loss: 2.0084 - accuracy: 0.2773 - val_loss: 3.2564 - val_accuracy: 0.1875
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 2.0123 - accuracy: 0.2725
Epoch 26: val_accuracy did not improve from 0.20625
64/64 [==============================] - 7s 108ms/step - loss: 2.0123 - accuracy: 0.2725 - val_loss: 4.4920 - val_accuracy: 0.1813
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 2.0375 - accuracy: 0.2666
Epoch 27: val_accuracy did not improve from 0.20625
64/64 [==============================] - 7s 110ms/step - loss: 2.0375 - accuracy: 0.2666 - val_loss: 6.9921 - val_accuracy: 0.1250
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 2.0064 - accuracy: 0.2568
Epoch 28: val_accuracy improved from 0.20625 to 0.25625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 8s 125ms/step - loss: 2.0064 - accuracy: 0.2568 - val_loss: 2.3767 - val_accuracy: 0.2562
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 1.9949 - accuracy: 0.2686
Epoch 29: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 108ms/step - loss: 1.9949 - accuracy: 0.2686 - val_loss: 4.1448 - val_accuracy: 0.1688
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 2.0360 - accuracy: 0.2568
Epoch 30: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 108ms/step - loss: 2.0360 - accuracy: 0.2568 - val_loss: 4.0018 - val_accuracy: 0.1750
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 1.9382 - accuracy: 0.3145
Epoch 31: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 109ms/step - loss: 1.9382 - accuracy: 0.3145 - val_loss: 2.1866 - val_accuracy: 0.2375
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 2.0084 - accuracy: 0.2666
Epoch 32: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 109ms/step - loss: 2.0084 - accuracy: 0.2666 - val_loss: 2.1756 - val_accuracy: 0.2313
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 1.9630 - accuracy: 0.3047
Epoch 33: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 115ms/step - loss: 1.9630 - accuracy: 0.3047 - val_loss: 2.9268 - val_accuracy: 0.2125
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 2.0253 - accuracy: 0.2500
Epoch 34: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 112ms/step - loss: 2.0253 - accuracy: 0.2500 - val_loss: 3.5004 - val_accuracy: 0.1750
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 1.9954 - accuracy: 0.2725
Epoch 35: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 109ms/step - loss: 1.9954 - accuracy: 0.2725 - val_loss: 4.1170 - val_accuracy: 0.1937
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 1.9349 - accuracy: 0.3379
Epoch 36: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 110ms/step - loss: 1.9349 - accuracy: 0.3379 - val_loss: 3.4796 - val_accuracy: 0.1937
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 2.0085 - accuracy: 0.2773
Epoch 37: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 108ms/step - loss: 2.0085 - accuracy: 0.2773 - val_loss: 3.4351 - val_accuracy: 0.2375
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 1.9349 - accuracy: 0.2979
Epoch 38: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 108ms/step - loss: 1.9349 - accuracy: 0.2979 - val_loss: 2.8473 - val_accuracy: 0.2125
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 1.9304 - accuracy: 0.3057
Epoch 39: val_accuracy did not improve from 0.25625
64/64 [==============================] - 7s 108ms/step - loss: 1.9304 - accuracy: 0.3057 - val_loss: 4.4854 - val_accuracy: 0.1813
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 1.9390 - accuracy: 0.2979
Epoch 40: val_accuracy improved from 0.25625 to 0.35000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 8s 128ms/step - loss: 1.9390 - accuracy: 0.2979 - val_loss: 1.9167 - val_accuracy: 0.3500
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 1.9106 - accuracy: 0.3076
Epoch 41: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.9106 - accuracy: 0.3076 - val_loss: 2.2609 - val_accuracy: 0.2250
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 1.9191 - accuracy: 0.3174
Epoch 42: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 107ms/step - loss: 1.9191 - accuracy: 0.3174 - val_loss: 2.8451 - val_accuracy: 0.2062
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 1.9054 - accuracy: 0.2998
Epoch 43: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.9054 - accuracy: 0.2998 - val_loss: 4.2945 - val_accuracy: 0.1875
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 1.8857 - accuracy: 0.3125
Epoch 44: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.8857 - accuracy: 0.3125 - val_loss: 1.8480 - val_accuracy: 0.3438
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 1.8791 - accuracy: 0.3066
Epoch 45: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.8791 - accuracy: 0.3066 - val_loss: 2.2119 - val_accuracy: 0.2875
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 1.9720 - accuracy: 0.2930
Epoch 46: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 107ms/step - loss: 1.9720 - accuracy: 0.2930 - val_loss: 5.0282 - val_accuracy: 0.2188
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 1.9373 - accuracy: 0.3057
Epoch 47: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 107ms/step - loss: 1.9373 - accuracy: 0.3057 - val_loss: 3.5290 - val_accuracy: 0.2125
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 1.8992 - accuracy: 0.3232
Epoch 48: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 107ms/step - loss: 1.8992 - accuracy: 0.3232 - val_loss: 3.0855 - val_accuracy: 0.2438
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 1.9142 - accuracy: 0.3184
Epoch 49: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.9142 - accuracy: 0.3184 - val_loss: 2.0345 - val_accuracy: 0.3000
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 1.9081 - accuracy: 0.3320
Epoch 50: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 1.9081 - accuracy: 0.3320 - val_loss: 2.3816 - val_accuracy: 0.2625
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 1.8490 - accuracy: 0.3359
Epoch 51: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.8490 - accuracy: 0.3359 - val_loss: 2.5539 - val_accuracy: 0.3000
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.8251 - accuracy: 0.3447
Epoch 52: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.8251 - accuracy: 0.3447 - val_loss: 2.0784 - val_accuracy: 0.3438
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 1.8323 - accuracy: 0.3535
Epoch 53: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 114ms/step - loss: 1.8323 - accuracy: 0.3535 - val_loss: 3.0686 - val_accuracy: 0.2375
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 1.8940 - accuracy: 0.3359
Epoch 54: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.8940 - accuracy: 0.3359 - val_loss: 2.0675 - val_accuracy: 0.2937
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 1.8594 - accuracy: 0.3516
Epoch 55: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 1.8594 - accuracy: 0.3516 - val_loss: 3.6510 - val_accuracy: 0.2125
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 1.9154 - accuracy: 0.3086
Epoch 56: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 114ms/step - loss: 1.9154 - accuracy: 0.3086 - val_loss: 1.8295 - val_accuracy: 0.3500
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.8954 - accuracy: 0.3232
Epoch 57: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 1.8954 - accuracy: 0.3232 - val_loss: 2.3479 - val_accuracy: 0.2000
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 1.7845 - accuracy: 0.3506
Epoch 58: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 1.7845 - accuracy: 0.3506 - val_loss: 2.1938 - val_accuracy: 0.2750
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 1.8423 - accuracy: 0.3359
Epoch 59: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 115ms/step - loss: 1.8423 - accuracy: 0.3359 - val_loss: 1.9672 - val_accuracy: 0.2438
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.8221 - accuracy: 0.3525
Epoch 60: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 115ms/step - loss: 1.8221 - accuracy: 0.3525 - val_loss: 2.4610 - val_accuracy: 0.2125
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.7808 - accuracy: 0.3730
Epoch 61: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.7808 - accuracy: 0.3730 - val_loss: 2.1835 - val_accuracy: 0.2937
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 1.8701 - accuracy: 0.3467
Epoch 62: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 1.8701 - accuracy: 0.3467 - val_loss: 2.3839 - val_accuracy: 0.3125
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.8164 - accuracy: 0.3516
Epoch 63: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 114ms/step - loss: 1.8164 - accuracy: 0.3516 - val_loss: 4.6246 - val_accuracy: 0.1625
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 1.7910 - accuracy: 0.3701
Epoch 64: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 1.7910 - accuracy: 0.3701 - val_loss: 2.0328 - val_accuracy: 0.3000
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.7716 - accuracy: 0.3770
Epoch 65: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.7716 - accuracy: 0.3770 - val_loss: 3.1629 - val_accuracy: 0.2125
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.8761 - accuracy: 0.3359
Epoch 66: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.8761 - accuracy: 0.3359 - val_loss: 2.4284 - val_accuracy: 0.2937
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.8640 - accuracy: 0.3389
Epoch 67: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 1.8640 - accuracy: 0.3389 - val_loss: 2.4586 - val_accuracy: 0.2562
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 1.7944 - accuracy: 0.3564
Epoch 68: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.7944 - accuracy: 0.3564 - val_loss: 2.0443 - val_accuracy: 0.3438
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.8779 - accuracy: 0.3398
Epoch 69: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 1.8779 - accuracy: 0.3398 - val_loss: 8.6222 - val_accuracy: 0.1312
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.7559 - accuracy: 0.3799
Epoch 70: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 108ms/step - loss: 1.7559 - accuracy: 0.3799 - val_loss: 4.6273 - val_accuracy: 0.2000
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.7605 - accuracy: 0.3945
Epoch 71: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 1.7605 - accuracy: 0.3945 - val_loss: 2.1680 - val_accuracy: 0.3500
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.8384 - accuracy: 0.3311
Epoch 72: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 1.8384 - accuracy: 0.3311 - val_loss: 1.8988 - val_accuracy: 0.3125
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.7392 - accuracy: 0.3887
Epoch 73: val_accuracy improved from 0.35000 to 0.39375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 8s 127ms/step - loss: 1.7392 - accuracy: 0.3887 - val_loss: 1.7783 - val_accuracy: 0.3938
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.8174 - accuracy: 0.3643
Epoch 74: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 110ms/step - loss: 1.8174 - accuracy: 0.3643 - val_loss: 2.1298 - val_accuracy: 0.2812
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.7179 - accuracy: 0.4102
Epoch 75: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 115ms/step - loss: 1.7179 - accuracy: 0.4102 - val_loss: 2.5623 - val_accuracy: 0.2812
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.7200 - accuracy: 0.4014
Epoch 76: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 113ms/step - loss: 1.7200 - accuracy: 0.4014 - val_loss: 1.9940 - val_accuracy: 0.3313
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.7650 - accuracy: 0.3691
Epoch 77: val_accuracy did not improve from 0.39375
64/64 [==============================] - 8s 117ms/step - loss: 1.7650 - accuracy: 0.3691 - val_loss: 1.8903 - val_accuracy: 0.3187
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.7151 - accuracy: 0.3857
Epoch 78: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 113ms/step - loss: 1.7151 - accuracy: 0.3857 - val_loss: 2.1607 - val_accuracy: 0.2625
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.7118 - accuracy: 0.3896
Epoch 79: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 113ms/step - loss: 1.7118 - accuracy: 0.3896 - val_loss: 2.5579 - val_accuracy: 0.3125
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.7500 - accuracy: 0.3662
Epoch 80: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 112ms/step - loss: 1.7500 - accuracy: 0.3662 - val_loss: 1.8617 - val_accuracy: 0.3438
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.7402 - accuracy: 0.3838
Epoch 81: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 115ms/step - loss: 1.7402 - accuracy: 0.3838 - val_loss: 1.9147 - val_accuracy: 0.3063
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.7417 - accuracy: 0.3633
Epoch 82: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 110ms/step - loss: 1.7417 - accuracy: 0.3633 - val_loss: 2.1663 - val_accuracy: 0.3812
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.7048 - accuracy: 0.3945
Epoch 83: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 108ms/step - loss: 1.7048 - accuracy: 0.3945 - val_loss: 1.9891 - val_accuracy: 0.3313
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.7710 - accuracy: 0.3916
Epoch 84: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 109ms/step - loss: 1.7710 - accuracy: 0.3916 - val_loss: 2.0776 - val_accuracy: 0.3063
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.7036 - accuracy: 0.4033
Epoch 85: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 109ms/step - loss: 1.7036 - accuracy: 0.4033 - val_loss: 2.3543 - val_accuracy: 0.2562
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.6645 - accuracy: 0.4033
Epoch 86: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 109ms/step - loss: 1.6645 - accuracy: 0.4033 - val_loss: 3.6196 - val_accuracy: 0.2250
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.6858 - accuracy: 0.4131
Epoch 87: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 109ms/step - loss: 1.6858 - accuracy: 0.4131 - val_loss: 2.5743 - val_accuracy: 0.2937
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.6449 - accuracy: 0.4346
Epoch 88: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 110ms/step - loss: 1.6449 - accuracy: 0.4346 - val_loss: 2.9760 - val_accuracy: 0.2375
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.7367 - accuracy: 0.3916
Epoch 89: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 110ms/step - loss: 1.7367 - accuracy: 0.3916 - val_loss: 2.2156 - val_accuracy: 0.3438
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.6443 - accuracy: 0.4326
Epoch 90: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 110ms/step - loss: 1.6443 - accuracy: 0.4326 - val_loss: 1.9783 - val_accuracy: 0.3125
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.6505 - accuracy: 0.4092
Epoch 91: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 112ms/step - loss: 1.6505 - accuracy: 0.4092 - val_loss: 1.8269 - val_accuracy: 0.3688
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.6804 - accuracy: 0.4082
Epoch 92: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 111ms/step - loss: 1.6804 - accuracy: 0.4082 - val_loss: 1.8600 - val_accuracy: 0.3375
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.6439 - accuracy: 0.4297
Epoch 93: val_accuracy did not improve from 0.39375
64/64 [==============================] - 7s 115ms/step - loss: 1.6439 - accuracy: 0.4297 - val_loss: 2.2290 - val_accuracy: 0.2562
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.6275 - accuracy: 0.4365
Epoch 94: val_accuracy improved from 0.39375 to 0.41875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_20230216150341_4.h5
64/64 [==============================] - 9s 140ms/step - loss: 1.6275 - accuracy: 0.4365 - val_loss: 1.7236 - val_accuracy: 0.4187
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.6243 - accuracy: 0.4277
Epoch 95: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.6243 - accuracy: 0.4277 - val_loss: 1.6285 - val_accuracy: 0.4187
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.6698 - accuracy: 0.4043
Epoch 96: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.6698 - accuracy: 0.4043 - val_loss: 2.5748 - val_accuracy: 0.3250
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.6297 - accuracy: 0.4346
Epoch 97: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.6297 - accuracy: 0.4346 - val_loss: 2.7248 - val_accuracy: 0.2875
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.6378 - accuracy: 0.4336
Epoch 98: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.6378 - accuracy: 0.4336 - val_loss: 2.3096 - val_accuracy: 0.2937
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.5916 - accuracy: 0.4385
Epoch 99: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 108ms/step - loss: 1.5916 - accuracy: 0.4385 - val_loss: 2.4441 - val_accuracy: 0.2750
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.6416 - accuracy: 0.4395
Epoch 100: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 108ms/step - loss: 1.6416 - accuracy: 0.4395 - val_loss: 2.0823 - val_accuracy: 0.3250
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.6305 - accuracy: 0.4199
Epoch 101: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.6305 - accuracy: 0.4199 - val_loss: 2.3706 - val_accuracy: 0.2812
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.6028 - accuracy: 0.4512
Epoch 102: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 108ms/step - loss: 1.6028 - accuracy: 0.4512 - val_loss: 2.0280 - val_accuracy: 0.3750
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.6113 - accuracy: 0.4395
Epoch 103: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.6113 - accuracy: 0.4395 - val_loss: 1.7363 - val_accuracy: 0.3688
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.5499 - accuracy: 0.4580
Epoch 104: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 108ms/step - loss: 1.5499 - accuracy: 0.4580 - val_loss: 2.1209 - val_accuracy: 0.3375
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.5882 - accuracy: 0.4580
Epoch 105: val_accuracy did not improve from 0.41875
64/64 [==============================] - 7s 109ms/step - loss: 1.5882 - accuracy: 0.4580 - val_loss: 1.9409 - val_accuracy: 0.4187
********* Training time: 1407.90625 s.
*****************
* Model Summary *
*****************
Model: "resnet50"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_4 (InputLayer) [(None, 224, 224, 3 0 []
)]
conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 ['input_4[0][0]']
conv1_conv (Conv2D) (None, 112, 112, 64 9472 ['conv1_pad[0][0]']
)
conv1_bn (BatchNormalization) (None, 112, 112, 64 256 ['conv1_conv[0][0]']
)
conv1_relu (Activation) (None, 112, 112, 64 0 ['conv1_bn[0][0]']
)
pool1_pad (ZeroPadding2D) (None, 114, 114, 64 0 ['conv1_relu[0][0]']
)
pool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 ['pool1_pad[0][0]']
conv2_block1_1_conv (Conv2D) (None, 56, 56, 64) 4160 ['pool1_pool[0][0]']
conv2_block1_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_1_conv[0][0]']
ization)
conv2_block1_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_1_bn[0][0]']
n)
conv2_block1_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block1_1_relu[0][0]']
conv2_block1_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block1_2_conv[0][0]']
ization)
conv2_block1_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block1_2_bn[0][0]']
n)
conv2_block1_0_conv (Conv2D) (None, 56, 56, 256) 16640 ['pool1_pool[0][0]']
conv2_block1_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block1_2_relu[0][0]']
conv2_block1_0_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_0_conv[0][0]']
ization)
conv2_block1_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block1_3_conv[0][0]']
ization)
conv2_block1_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_0_bn[0][0]',
'conv2_block1_3_bn[0][0]']
conv2_block1_out (Activation) (None, 56, 56, 256) 0 ['conv2_block1_add[0][0]']
conv2_block2_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block1_out[0][0]']
conv2_block2_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_1_conv[0][0]']
ization)
conv2_block2_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_1_bn[0][0]']
n)
conv2_block2_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block2_1_relu[0][0]']
conv2_block2_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block2_2_conv[0][0]']
ization)
conv2_block2_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block2_2_bn[0][0]']
n)
conv2_block2_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block2_2_relu[0][0]']
conv2_block2_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block2_3_conv[0][0]']
ization)
conv2_block2_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_out[0][0]',
'conv2_block2_3_bn[0][0]']
conv2_block2_out (Activation) (None, 56, 56, 256) 0 ['conv2_block2_add[0][0]']
conv2_block3_1_conv (Conv2D) (None, 56, 56, 64) 16448 ['conv2_block2_out[0][0]']
conv2_block3_1_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_1_conv[0][0]']
ization)
conv2_block3_1_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_1_bn[0][0]']
n)
conv2_block3_2_conv (Conv2D) (None, 56, 56, 64) 36928 ['conv2_block3_1_relu[0][0]']
conv2_block3_2_bn (BatchNormal (None, 56, 56, 64) 256 ['conv2_block3_2_conv[0][0]']
ization)
conv2_block3_2_relu (Activatio (None, 56, 56, 64) 0 ['conv2_block3_2_bn[0][0]']
n)
conv2_block3_3_conv (Conv2D) (None, 56, 56, 256) 16640 ['conv2_block3_2_relu[0][0]']
conv2_block3_3_bn (BatchNormal (None, 56, 56, 256) 1024 ['conv2_block3_3_conv[0][0]']
ization)
conv2_block3_add (Add) (None, 56, 56, 256) 0 ['conv2_block2_out[0][0]',
'conv2_block3_3_bn[0][0]']
conv2_block3_out (Activation) (None, 56, 56, 256) 0 ['conv2_block3_add[0][0]']
conv3_block1_1_conv (Conv2D) (None, 28, 28, 128) 32896 ['conv2_block3_out[0][0]']
conv3_block1_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_1_conv[0][0]']
ization)
conv3_block1_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_1_bn[0][0]']
n)
conv3_block1_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block1_1_relu[0][0]']
conv3_block1_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block1_2_conv[0][0]']
ization)
conv3_block1_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block1_2_bn[0][0]']
n)
conv3_block1_0_conv (Conv2D) (None, 28, 28, 512) 131584 ['conv2_block3_out[0][0]']
conv3_block1_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block1_2_relu[0][0]']
conv3_block1_0_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_0_conv[0][0]']
ization)
conv3_block1_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block1_3_conv[0][0]']
ization)
conv3_block1_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_0_bn[0][0]',
'conv3_block1_3_bn[0][0]']
conv3_block1_out (Activation) (None, 28, 28, 512) 0 ['conv3_block1_add[0][0]']
conv3_block2_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block1_out[0][0]']
conv3_block2_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_1_conv[0][0]']
ization)
conv3_block2_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_1_bn[0][0]']
n)
conv3_block2_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block2_1_relu[0][0]']
conv3_block2_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block2_2_conv[0][0]']
ization)
conv3_block2_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block2_2_bn[0][0]']
n)
conv3_block2_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block2_2_relu[0][0]']
conv3_block2_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block2_3_conv[0][0]']
ization)
conv3_block2_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_out[0][0]',
'conv3_block2_3_bn[0][0]']
conv3_block2_out (Activation) (None, 28, 28, 512) 0 ['conv3_block2_add[0][0]']
conv3_block3_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block2_out[0][0]']
conv3_block3_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_1_conv[0][0]']
ization)
conv3_block3_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_1_bn[0][0]']
n)
conv3_block3_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block3_1_relu[0][0]']
conv3_block3_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block3_2_conv[0][0]']
ization)
conv3_block3_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block3_2_bn[0][0]']
n)
conv3_block3_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block3_2_relu[0][0]']
conv3_block3_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block3_3_conv[0][0]']
ization)
conv3_block3_add (Add) (None, 28, 28, 512) 0 ['conv3_block2_out[0][0]',
'conv3_block3_3_bn[0][0]']
conv3_block3_out (Activation) (None, 28, 28, 512) 0 ['conv3_block3_add[0][0]']
conv3_block4_1_conv (Conv2D) (None, 28, 28, 128) 65664 ['conv3_block3_out[0][0]']
conv3_block4_1_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_1_conv[0][0]']
ization)
conv3_block4_1_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_1_bn[0][0]']
n)
conv3_block4_2_conv (Conv2D) (None, 28, 28, 128) 147584 ['conv3_block4_1_relu[0][0]']
conv3_block4_2_bn (BatchNormal (None, 28, 28, 128) 512 ['conv3_block4_2_conv[0][0]']
ization)
conv3_block4_2_relu (Activatio (None, 28, 28, 128) 0 ['conv3_block4_2_bn[0][0]']
n)
conv3_block4_3_conv (Conv2D) (None, 28, 28, 512) 66048 ['conv3_block4_2_relu[0][0]']
conv3_block4_3_bn (BatchNormal (None, 28, 28, 512) 2048 ['conv3_block4_3_conv[0][0]']
ization)
conv3_block4_add (Add) (None, 28, 28, 512) 0 ['conv3_block3_out[0][0]',
'conv3_block4_3_bn[0][0]']
conv3_block4_out (Activation) (None, 28, 28, 512) 0 ['conv3_block4_add[0][0]']
conv4_block1_1_conv (Conv2D) (None, 14, 14, 256) 131328 ['conv3_block4_out[0][0]']
conv4_block1_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_1_conv[0][0]']
ization)
conv4_block1_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_1_bn[0][0]']
n)
conv4_block1_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block1_1_relu[0][0]']
conv4_block1_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block1_2_conv[0][0]']
ization)
conv4_block1_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block1_2_bn[0][0]']
n)
conv4_block1_0_conv (Conv2D) (None, 14, 14, 1024 525312 ['conv3_block4_out[0][0]']
)
conv4_block1_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block1_2_relu[0][0]']
)
conv4_block1_0_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_0_conv[0][0]']
ization) )
conv4_block1_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block1_3_conv[0][0]']
ization) )
conv4_block1_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_0_bn[0][0]',
) 'conv4_block1_3_bn[0][0]']
conv4_block1_out (Activation) (None, 14, 14, 1024 0 ['conv4_block1_add[0][0]']
)
conv4_block2_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block1_out[0][0]']
conv4_block2_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_1_conv[0][0]']
ization)
conv4_block2_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_1_bn[0][0]']
n)
conv4_block2_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block2_1_relu[0][0]']
conv4_block2_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block2_2_conv[0][0]']
ization)
conv4_block2_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block2_2_bn[0][0]']
n)
conv4_block2_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block2_2_relu[0][0]']
)
conv4_block2_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block2_3_conv[0][0]']
ization) )
conv4_block2_add (Add) (None, 14, 14, 1024 0 ['conv4_block1_out[0][0]',
) 'conv4_block2_3_bn[0][0]']
conv4_block2_out (Activation) (None, 14, 14, 1024 0 ['conv4_block2_add[0][0]']
)
conv4_block3_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block2_out[0][0]']
conv4_block3_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_1_conv[0][0]']
ization)
conv4_block3_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_1_bn[0][0]']
n)
conv4_block3_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block3_1_relu[0][0]']
conv4_block3_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block3_2_conv[0][0]']
ization)
conv4_block3_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block3_2_bn[0][0]']
n)
conv4_block3_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block3_2_relu[0][0]']
)
conv4_block3_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block3_3_conv[0][0]']
ization) )
conv4_block3_add (Add) (None, 14, 14, 1024 0 ['conv4_block2_out[0][0]',
) 'conv4_block3_3_bn[0][0]']
conv4_block3_out (Activation) (None, 14, 14, 1024 0 ['conv4_block3_add[0][0]']
)
conv4_block4_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block3_out[0][0]']
conv4_block4_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_1_conv[0][0]']
ization)
conv4_block4_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_1_bn[0][0]']
n)
conv4_block4_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block4_1_relu[0][0]']
conv4_block4_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block4_2_conv[0][0]']
ization)
conv4_block4_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block4_2_bn[0][0]']
n)
conv4_block4_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block4_2_relu[0][0]']
)
conv4_block4_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block4_3_conv[0][0]']
ization) )
conv4_block4_add (Add) (None, 14, 14, 1024 0 ['conv4_block3_out[0][0]',
) 'conv4_block4_3_bn[0][0]']
conv4_block4_out (Activation) (None, 14, 14, 1024 0 ['conv4_block4_add[0][0]']
)
conv4_block5_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block4_out[0][0]']
conv4_block5_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_1_conv[0][0]']
ization)
conv4_block5_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_1_bn[0][0]']
n)
conv4_block5_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block5_1_relu[0][0]']
conv4_block5_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block5_2_conv[0][0]']
ization)
conv4_block5_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block5_2_bn[0][0]']
n)
conv4_block5_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block5_2_relu[0][0]']
)
conv4_block5_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block5_3_conv[0][0]']
ization) )
conv4_block5_add (Add) (None, 14, 14, 1024 0 ['conv4_block4_out[0][0]',
) 'conv4_block5_3_bn[0][0]']
conv4_block5_out (Activation) (None, 14, 14, 1024 0 ['conv4_block5_add[0][0]']
)
conv4_block6_1_conv (Conv2D) (None, 14, 14, 256) 262400 ['conv4_block5_out[0][0]']
conv4_block6_1_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_1_conv[0][0]']
ization)
conv4_block6_1_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_1_bn[0][0]']
n)
conv4_block6_2_conv (Conv2D) (None, 14, 14, 256) 590080 ['conv4_block6_1_relu[0][0]']
conv4_block6_2_bn (BatchNormal (None, 14, 14, 256) 1024 ['conv4_block6_2_conv[0][0]']
ization)
conv4_block6_2_relu (Activatio (None, 14, 14, 256) 0 ['conv4_block6_2_bn[0][0]']
n)
conv4_block6_3_conv (Conv2D) (None, 14, 14, 1024 263168 ['conv4_block6_2_relu[0][0]']
)
conv4_block6_3_bn (BatchNormal (None, 14, 14, 1024 4096 ['conv4_block6_3_conv[0][0]']
ization) )
conv4_block6_add (Add) (None, 14, 14, 1024 0 ['conv4_block5_out[0][0]',
) 'conv4_block6_3_bn[0][0]']
conv4_block6_out (Activation) (None, 14, 14, 1024 0 ['conv4_block6_add[0][0]']
)
conv5_block1_1_conv (Conv2D) (None, 7, 7, 512) 524800 ['conv4_block6_out[0][0]']
conv5_block1_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_1_conv[0][0]']
ization)
conv5_block1_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_1_bn[0][0]']
n)
conv5_block1_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block1_1_relu[0][0]']
conv5_block1_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block1_2_conv[0][0]']
ization)
conv5_block1_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block1_2_bn[0][0]']
n)
conv5_block1_0_conv (Conv2D) (None, 7, 7, 2048) 2099200 ['conv4_block6_out[0][0]']
conv5_block1_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block1_2_relu[0][0]']
conv5_block1_0_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_0_conv[0][0]']
ization)
conv5_block1_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block1_3_conv[0][0]']
ization)
conv5_block1_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_0_bn[0][0]',
'conv5_block1_3_bn[0][0]']
conv5_block1_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block1_add[0][0]']
conv5_block2_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block1_out[0][0]']
conv5_block2_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_1_conv[0][0]']
ization)
conv5_block2_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_1_bn[0][0]']
n)
conv5_block2_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block2_1_relu[0][0]']
conv5_block2_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block2_2_conv[0][0]']
ization)
conv5_block2_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block2_2_bn[0][0]']
n)
conv5_block2_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block2_2_relu[0][0]']
conv5_block2_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block2_3_conv[0][0]']
ization)
conv5_block2_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_out[0][0]',
'conv5_block2_3_bn[0][0]']
conv5_block2_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block2_add[0][0]']
conv5_block3_1_conv (Conv2D) (None, 7, 7, 512) 1049088 ['conv5_block2_out[0][0]']
conv5_block3_1_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_1_conv[0][0]']
ization)
conv5_block3_1_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_1_bn[0][0]']
n)
conv5_block3_2_conv (Conv2D) (None, 7, 7, 512) 2359808 ['conv5_block3_1_relu[0][0]']
conv5_block3_2_bn (BatchNormal (None, 7, 7, 512) 2048 ['conv5_block3_2_conv[0][0]']
ization)
conv5_block3_2_relu (Activatio (None, 7, 7, 512) 0 ['conv5_block3_2_bn[0][0]']
n)
conv5_block3_3_conv (Conv2D) (None, 7, 7, 2048) 1050624 ['conv5_block3_2_relu[0][0]']
conv5_block3_3_bn (BatchNormal (None, 7, 7, 2048) 8192 ['conv5_block3_3_conv[0][0]']
ization)
conv5_block3_add (Add) (None, 7, 7, 2048) 0 ['conv5_block2_out[0][0]',
'conv5_block3_3_bn[0][0]']
conv5_block3_out (Activation) (None, 7, 7, 2048) 0 ['conv5_block3_add[0][0]']
avg_pool (GlobalAveragePooling (None, 2048) 0 ['conv5_block3_out[0][0]']
2D)
predictions (Dense) (None, 12) 24588 ['avg_pool[0][0]']
==================================================================================================
Total params: 23,612,300
Trainable params: 23,559,180
Non-trainable params: 53,120
__________________________________________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 7s 27ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.5110 0.7704 0.6145 331
anarrhichomenum 0.3707 0.7053 0.4860 319
brevantherum 0.8571 0.0192 0.0376 312
dulcamara 0.2825 0.3359 0.3069 259
herposolanum 0.5217 0.1319 0.2105 273
holophylla 0.5292 0.4589 0.4915 316
lasiocarpa 0.8097 0.7994 0.8045 314
melongena 0.6201 0.3675 0.4615 302
micracantha 0.2291 0.3746 0.2843 315
petota 0.2978 0.5225 0.3794 289
solanum 0.4437 0.2620 0.3295 271
torva 0.2909 0.1605 0.2069 299
accuracy 0.4178 3600
macro avg 0.4803 0.4090 0.3844 3600
weighted avg 0.4846 0.4178 0.3902 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
******************************* * Mean metrics across 4 folds * *******************************
| 0 | |
|---|---|
| accuracy | 0.355208 |
| acanthophora.precision | 0.574573 |
| acanthophora.recall | 0.581872 |
| acanthophora.f1-score | 0.567652 |
| acanthophora.support | 298.250000 |
| anarrhichomenum.precision | 0.430213 |
| anarrhichomenum.recall | 0.703196 |
| anarrhichomenum.f1-score | 0.460831 |
| anarrhichomenum.support | 318.750000 |
| brevantherum.precision | 0.385957 |
| brevantherum.recall | 0.249190 |
| brevantherum.f1-score | 0.209797 |
| brevantherum.support | 288.000000 |
| dulcamara.precision | 0.384649 |
| dulcamara.recall | 0.195036 |
| dulcamara.f1-score | 0.224633 |
| dulcamara.support | 295.500000 |
| herposolanum.precision | 0.402083 |
| herposolanum.recall | 0.315441 |
| herposolanum.f1-score | 0.251336 |
| herposolanum.support | 285.000000 |
| holophylla.precision | 0.516875 |
| holophylla.recall | 0.331999 |
| holophylla.f1-score | 0.374831 |
| holophylla.support | 304.750000 |
| lasiocarpa.precision | 0.781612 |
| lasiocarpa.recall | 0.609413 |
| lasiocarpa.f1-score | 0.627092 |
| lasiocarpa.support | 312.500000 |
| melongena.precision | 0.567061 |
| melongena.recall | 0.313610 |
| melongena.f1-score | 0.361499 |
| melongena.support | 298.000000 |
| micracantha.precision | 0.250282 |
| micracantha.recall | 0.306187 |
| micracantha.f1-score | 0.253060 |
| micracantha.support | 300.000000 |
| petota.precision | 0.271986 |
| petota.recall | 0.379494 |
| petota.f1-score | 0.299666 |
| petota.support | 304.500000 |
| solanum.precision | 0.458515 |
| solanum.recall | 0.106630 |
| solanum.f1-score | 0.154527 |
| solanum.support | 294.500000 |
| torva.precision | 0.347437 |
| torva.recall | 0.127967 |
| torva.f1-score | 0.166701 |
| torva.support | 300.250000 |
| macro avg.precision | 0.447604 |
| macro avg.recall | 0.351670 |
| macro avg.f1-score | 0.329302 |
| macro avg.support | 3600.000000 |
| weighted avg.precision | 0.450277 |
| weighted avg.recall | 0.355208 |
| weighted avg.f1-score | 0.331415 |
| weighted avg.support | 3600.000000 |
CPU times: total: 1h 34min 59s Wall time: 51min 42s
%%time
kf = ShuffleSplit(n_splits=CV_FOLDS,
test_size=VAL_SIZE,
random_state=RANDOM_SEED)
split = 1
resnet50_tl_cv_val_pred = []
for train_index, val_index in kf.split(balanced_training_data):
resnet50_tl_model = tf.keras.models.Sequential(name="resnet50_imagenet")
imagenet_resnet50_model= tf.keras.applications.ResNet50(
include_top=False,
pooling="max",
input_shape=(224,224,3),
classes=NUM_CLASSES,
weights='imagenet',)
for layer in imagenet_resnet50_model.layers[:-14]:
layer.trainable=False
# Add a custom FC layer for actual learning
resnet50_tl_model.add(imagenet_resnet50_model)
resnet50_tl_model.add(tf.keras.layers.Flatten())
#resnet50_tl_model.add(tf.keras.layers.Dense(4096, activation='relu'))
#resnet50_tl_model.add(tf.keras.layers.Dense(4096, activation='relu'))
resnet50_tl_model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
# Add an optimizer
resnet50_tl_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.003),
loss='categorical_crossentropy',
metrics=['accuracy'])
#resnet50_tl_model.compile(optimizer=tf.optimizers.SGD(learning_rate=0.003, momentum=0.9),
# loss='categorical_crossentropy',
# metrics=['accuracy'])
training_split_data = balanced_training_data.iloc[train_index]
val_split_data = balanced_training_data.iloc[val_index]
# Add a progress bar and save checkpoints
resnet50_tl_callbacks = [
create_model_checkpoint(os.path.join(DATA_ROOT_LOCATION, f"resnet50_tl_{TRAINING_RUN_ID}_{split}.h5")),
tf.keras.callbacks.ProgbarLogger(
count_mode = 'steps',
stateful_metrics = None
),
tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy',
min_delta=0,
patience=40,
verbose=1,
mode='auto',
restore_best_weights=True
)
]
fit_params = {
"x": training_split_data,
"epochs": 105,
"callbacks": resnet50_tl_callbacks,
"validation_data": val_split_data,
"steps_per_epoch": 64,
"validation_steps": 10,
}
preproc_func = tf.keras.applications.resnet50.preprocess_input
with tf.device(TRAINING_DEVICE_NAME):
resnet50_tl_training_history = train_model(resnet50_tl_model,
fit_params=fit_params,
preproc_func=preproc_func)
_, pred_report = evaluate_model(resnet50_tl_model,
resnet50_tl_training_history,
fit_params=fit_params,
preproc_func=preproc_func)
resnet50_tl_cv_val_pred.append(pred_report)
split += 1
calculate_cv_mean_metrics(resnet50_tl_cv_val_pred)
****************************
* Started at 4408.09375... *
****************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 9.7812 - accuracy: 0.1016
Epoch 1: val_accuracy improved from -inf to 0.10625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 11s 136ms/step - loss: 9.7812 - accuracy: 0.1016 - val_loss: 10.5562 - val_accuracy: 0.1063
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 4.6693 - accuracy: 0.1172
Epoch 2: val_accuracy improved from 0.10625 to 0.11250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 123ms/step - loss: 4.6693 - accuracy: 0.1172 - val_loss: 2.6248 - val_accuracy: 0.1125
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 3.9980 - accuracy: 0.1260
Epoch 3: val_accuracy improved from 0.11250 to 0.13750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 121ms/step - loss: 3.9980 - accuracy: 0.1260 - val_loss: 2.7891 - val_accuracy: 0.1375
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 3.5856 - accuracy: 0.1348
Epoch 4: val_accuracy did not improve from 0.13750
64/64 [==============================] - 7s 112ms/step - loss: 3.5856 - accuracy: 0.1348 - val_loss: 2.5130 - val_accuracy: 0.1312
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 3.5041 - accuracy: 0.1533
Epoch 5: val_accuracy improved from 0.13750 to 0.16250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 121ms/step - loss: 3.5041 - accuracy: 0.1533 - val_loss: 2.6289 - val_accuracy: 0.1625
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 3.1963 - accuracy: 0.1689
Epoch 6: val_accuracy improved from 0.16250 to 0.20625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 121ms/step - loss: 3.1963 - accuracy: 0.1689 - val_loss: 2.4895 - val_accuracy: 0.2062
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 3.1937 - accuracy: 0.1621
Epoch 7: val_accuracy improved from 0.20625 to 0.23750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 123ms/step - loss: 3.1937 - accuracy: 0.1621 - val_loss: 2.3971 - val_accuracy: 0.2375
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 2.7437 - accuracy: 0.1904
Epoch 8: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 111ms/step - loss: 2.7437 - accuracy: 0.1904 - val_loss: 2.4495 - val_accuracy: 0.2188
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 2.7180 - accuracy: 0.1660
Epoch 9: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 112ms/step - loss: 2.7180 - accuracy: 0.1660 - val_loss: 2.4630 - val_accuracy: 0.1750
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 2.6787 - accuracy: 0.1807
Epoch 10: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 114ms/step - loss: 2.6787 - accuracy: 0.1807 - val_loss: 2.3248 - val_accuracy: 0.2250
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 2.6185 - accuracy: 0.1807
Epoch 11: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 110ms/step - loss: 2.6185 - accuracy: 0.1807 - val_loss: 2.2160 - val_accuracy: 0.2313
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 2.4152 - accuracy: 0.1797
Epoch 12: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 112ms/step - loss: 2.4152 - accuracy: 0.1797 - val_loss: 2.6172 - val_accuracy: 0.1688
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 2.3867 - accuracy: 0.2051
Epoch 13: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 113ms/step - loss: 2.3867 - accuracy: 0.2051 - val_loss: 2.4021 - val_accuracy: 0.2188
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 2.3620 - accuracy: 0.1924
Epoch 14: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 110ms/step - loss: 2.3620 - accuracy: 0.1924 - val_loss: 2.5437 - val_accuracy: 0.1500
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 2.3576 - accuracy: 0.2041
Epoch 15: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 111ms/step - loss: 2.3576 - accuracy: 0.2041 - val_loss: 2.2172 - val_accuracy: 0.2313
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 2.2703 - accuracy: 0.2275
Epoch 16: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 112ms/step - loss: 2.2703 - accuracy: 0.2275 - val_loss: 2.3520 - val_accuracy: 0.1937
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 2.3020 - accuracy: 0.2305
Epoch 17: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 110ms/step - loss: 2.3020 - accuracy: 0.2305 - val_loss: 2.5890 - val_accuracy: 0.2000
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 2.2257 - accuracy: 0.2080
Epoch 18: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 111ms/step - loss: 2.2257 - accuracy: 0.2080 - val_loss: 2.2904 - val_accuracy: 0.2313
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 2.2487 - accuracy: 0.2236
Epoch 19: val_accuracy did not improve from 0.23750
64/64 [==============================] - 7s 112ms/step - loss: 2.2487 - accuracy: 0.2236 - val_loss: 2.4351 - val_accuracy: 0.2313
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 2.2961 - accuracy: 0.2227
Epoch 20: val_accuracy improved from 0.23750 to 0.25000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 121ms/step - loss: 2.2961 - accuracy: 0.2227 - val_loss: 2.5697 - val_accuracy: 0.2500
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 2.2324 - accuracy: 0.2432
Epoch 21: val_accuracy improved from 0.25000 to 0.28125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 122ms/step - loss: 2.2324 - accuracy: 0.2432 - val_loss: 2.2166 - val_accuracy: 0.2812
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 2.2168 - accuracy: 0.2500
Epoch 22: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 114ms/step - loss: 2.2168 - accuracy: 0.2500 - val_loss: 2.4867 - val_accuracy: 0.2062
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 2.2358 - accuracy: 0.2354
Epoch 23: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 111ms/step - loss: 2.2358 - accuracy: 0.2354 - val_loss: 2.6120 - val_accuracy: 0.1750
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 2.1811 - accuracy: 0.2617
Epoch 24: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 112ms/step - loss: 2.1811 - accuracy: 0.2617 - val_loss: 2.4839 - val_accuracy: 0.2000
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 2.1387 - accuracy: 0.2520
Epoch 25: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 114ms/step - loss: 2.1387 - accuracy: 0.2520 - val_loss: 2.8331 - val_accuracy: 0.1875
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 2.1543 - accuracy: 0.2383
Epoch 26: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 111ms/step - loss: 2.1543 - accuracy: 0.2383 - val_loss: 2.2987 - val_accuracy: 0.2750
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 2.1374 - accuracy: 0.2637
Epoch 27: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 111ms/step - loss: 2.1374 - accuracy: 0.2637 - val_loss: 2.5094 - val_accuracy: 0.2438
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 2.1392 - accuracy: 0.2744
Epoch 28: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 114ms/step - loss: 2.1392 - accuracy: 0.2744 - val_loss: 2.4446 - val_accuracy: 0.2562
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 2.1015 - accuracy: 0.2744
Epoch 29: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 111ms/step - loss: 2.1015 - accuracy: 0.2744 - val_loss: 2.2231 - val_accuracy: 0.2688
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 2.1173 - accuracy: 0.2471
Epoch 30: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 111ms/step - loss: 2.1173 - accuracy: 0.2471 - val_loss: 2.4541 - val_accuracy: 0.2625
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 2.1738 - accuracy: 0.2451
Epoch 31: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 114ms/step - loss: 2.1738 - accuracy: 0.2451 - val_loss: 2.2989 - val_accuracy: 0.2625
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 2.0966 - accuracy: 0.2617
Epoch 32: val_accuracy did not improve from 0.28125
64/64 [==============================] - 7s 111ms/step - loss: 2.0966 - accuracy: 0.2617 - val_loss: 2.1202 - val_accuracy: 0.2812
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 2.1353 - accuracy: 0.2607
Epoch 33: val_accuracy improved from 0.28125 to 0.30000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 126ms/step - loss: 2.1353 - accuracy: 0.2607 - val_loss: 2.0906 - val_accuracy: 0.3000
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 2.1408 - accuracy: 0.2461
Epoch 34: val_accuracy did not improve from 0.30000
64/64 [==============================] - 7s 113ms/step - loss: 2.1408 - accuracy: 0.2461 - val_loss: 2.3615 - val_accuracy: 0.2188
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 2.1295 - accuracy: 0.2578
Epoch 35: val_accuracy did not improve from 0.30000
64/64 [==============================] - 7s 111ms/step - loss: 2.1295 - accuracy: 0.2578 - val_loss: 2.2119 - val_accuracy: 0.2438
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 2.0974 - accuracy: 0.2549
Epoch 36: val_accuracy did not improve from 0.30000
64/64 [==============================] - 7s 110ms/step - loss: 2.0974 - accuracy: 0.2549 - val_loss: 2.2490 - val_accuracy: 0.2188
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 2.1215 - accuracy: 0.2861
Epoch 37: val_accuracy did not improve from 0.30000
64/64 [==============================] - 7s 113ms/step - loss: 2.1215 - accuracy: 0.2861 - val_loss: 2.2203 - val_accuracy: 0.2625
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 2.1530 - accuracy: 0.2539
Epoch 38: val_accuracy did not improve from 0.30000
64/64 [==============================] - 7s 110ms/step - loss: 2.1530 - accuracy: 0.2539 - val_loss: 2.3198 - val_accuracy: 0.2313
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 2.0472 - accuracy: 0.2764
Epoch 39: val_accuracy did not improve from 0.30000
64/64 [==============================] - 7s 110ms/step - loss: 2.0472 - accuracy: 0.2764 - val_loss: 2.2323 - val_accuracy: 0.3000
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 2.0886 - accuracy: 0.2666
Epoch 40: val_accuracy improved from 0.30000 to 0.31250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 128ms/step - loss: 2.0886 - accuracy: 0.2666 - val_loss: 2.0446 - val_accuracy: 0.3125
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 2.0670 - accuracy: 0.2812
Epoch 41: val_accuracy improved from 0.31250 to 0.31875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 119ms/step - loss: 2.0670 - accuracy: 0.2812 - val_loss: 2.0212 - val_accuracy: 0.3187
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 2.0874 - accuracy: 0.2793
Epoch 42: val_accuracy did not improve from 0.31875
64/64 [==============================] - 7s 114ms/step - loss: 2.0874 - accuracy: 0.2793 - val_loss: 2.1085 - val_accuracy: 0.3063
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 2.1161 - accuracy: 0.2559
Epoch 43: val_accuracy did not improve from 0.31875
64/64 [==============================] - 7s 115ms/step - loss: 2.1161 - accuracy: 0.2559 - val_loss: 2.1229 - val_accuracy: 0.2937
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 2.1339 - accuracy: 0.2520
Epoch 44: val_accuracy improved from 0.31875 to 0.33125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 123ms/step - loss: 2.1339 - accuracy: 0.2520 - val_loss: 1.9852 - val_accuracy: 0.3313
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 2.1035 - accuracy: 0.2588
Epoch 45: val_accuracy did not improve from 0.33125
64/64 [==============================] - 7s 112ms/step - loss: 2.1035 - accuracy: 0.2588 - val_loss: 1.9931 - val_accuracy: 0.2812
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 2.0172 - accuracy: 0.2900
Epoch 46: val_accuracy did not improve from 0.33125
64/64 [==============================] - 7s 112ms/step - loss: 2.0172 - accuracy: 0.2900 - val_loss: 2.0509 - val_accuracy: 0.3000
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 2.0648 - accuracy: 0.2842
Epoch 47: val_accuracy did not improve from 0.33125
64/64 [==============================] - 7s 110ms/step - loss: 2.0648 - accuracy: 0.2842 - val_loss: 2.2216 - val_accuracy: 0.2750
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 2.0550 - accuracy: 0.2871
Epoch 48: val_accuracy did not improve from 0.33125
64/64 [==============================] - 7s 111ms/step - loss: 2.0550 - accuracy: 0.2871 - val_loss: 2.3782 - val_accuracy: 0.2375
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 2.0413 - accuracy: 0.2910
Epoch 49: val_accuracy improved from 0.33125 to 0.40625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_1.h5
64/64 [==============================] - 8s 122ms/step - loss: 2.0413 - accuracy: 0.2910 - val_loss: 1.8583 - val_accuracy: 0.4062
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 2.0602 - accuracy: 0.2803
Epoch 50: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 2.0602 - accuracy: 0.2803 - val_loss: 2.1885 - val_accuracy: 0.2188
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 2.0507 - accuracy: 0.2969
Epoch 51: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 2.0507 - accuracy: 0.2969 - val_loss: 1.9618 - val_accuracy: 0.3000
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 2.0516 - accuracy: 0.2549
Epoch 52: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 2.0516 - accuracy: 0.2549 - val_loss: 2.3482 - val_accuracy: 0.2500
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 2.0233 - accuracy: 0.3164
Epoch 53: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 2.0233 - accuracy: 0.3164 - val_loss: 1.9483 - val_accuracy: 0.3063
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 2.0763 - accuracy: 0.2803
Epoch 54: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 2.0763 - accuracy: 0.2803 - val_loss: 1.9815 - val_accuracy: 0.3187
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 2.0491 - accuracy: 0.2881
Epoch 55: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 2.0491 - accuracy: 0.2881 - val_loss: 1.9879 - val_accuracy: 0.3000
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 2.0247 - accuracy: 0.2803
Epoch 56: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 2.0247 - accuracy: 0.2803 - val_loss: 2.0569 - val_accuracy: 0.2750
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.9914 - accuracy: 0.3115
Epoch 57: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 1.9914 - accuracy: 0.3115 - val_loss: 2.1460 - val_accuracy: 0.3187
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 2.0872 - accuracy: 0.2852
Epoch 58: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 2.0872 - accuracy: 0.2852 - val_loss: 2.1906 - val_accuracy: 0.2250
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 2.0007 - accuracy: 0.3066
Epoch 59: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 2.0007 - accuracy: 0.3066 - val_loss: 2.5043 - val_accuracy: 0.2438
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 2.0124 - accuracy: 0.2734
Epoch 60: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 2.0124 - accuracy: 0.2734 - val_loss: 2.1974 - val_accuracy: 0.2625
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 2.0005 - accuracy: 0.2861
Epoch 61: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 114ms/step - loss: 2.0005 - accuracy: 0.2861 - val_loss: 2.1705 - val_accuracy: 0.2812
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 2.0755 - accuracy: 0.2598
Epoch 62: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 2.0755 - accuracy: 0.2598 - val_loss: 1.9843 - val_accuracy: 0.3063
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 2.0320 - accuracy: 0.2930
Epoch 63: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 2.0320 - accuracy: 0.2930 - val_loss: 2.5575 - val_accuracy: 0.2000
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 2.0686 - accuracy: 0.2939
Epoch 64: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 2.0686 - accuracy: 0.2939 - val_loss: 1.9575 - val_accuracy: 0.3187
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.9595 - accuracy: 0.3105
Epoch 65: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 1.9595 - accuracy: 0.3105 - val_loss: 2.0668 - val_accuracy: 0.3063
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 2.0012 - accuracy: 0.3008
Epoch 66: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 2.0012 - accuracy: 0.3008 - val_loss: 2.3435 - val_accuracy: 0.3063
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 2.0257 - accuracy: 0.2988
Epoch 67: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 114ms/step - loss: 2.0257 - accuracy: 0.2988 - val_loss: 2.1740 - val_accuracy: 0.2625
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 2.0065 - accuracy: 0.2969
Epoch 68: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 2.0065 - accuracy: 0.2969 - val_loss: 2.3022 - val_accuracy: 0.2438
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.9959 - accuracy: 0.2998
Epoch 69: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 1.9959 - accuracy: 0.2998 - val_loss: 1.9650 - val_accuracy: 0.3187
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 2.0112 - accuracy: 0.2930
Epoch 70: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 2.0112 - accuracy: 0.2930 - val_loss: 2.4239 - val_accuracy: 0.2375
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.9939 - accuracy: 0.2861
Epoch 71: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 1.9939 - accuracy: 0.2861 - val_loss: 2.1238 - val_accuracy: 0.2500
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.9831 - accuracy: 0.2910
Epoch 72: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 1.9831 - accuracy: 0.2910 - val_loss: 2.3316 - val_accuracy: 0.2313
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.9850 - accuracy: 0.3164
Epoch 73: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 1.9850 - accuracy: 0.3164 - val_loss: 2.0942 - val_accuracy: 0.3000
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 2.0228 - accuracy: 0.2803
Epoch 74: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 2.0228 - accuracy: 0.2803 - val_loss: 2.0668 - val_accuracy: 0.2500
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 2.0015 - accuracy: 0.2969
Epoch 75: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 2.0015 - accuracy: 0.2969 - val_loss: 2.0452 - val_accuracy: 0.2875
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.9747 - accuracy: 0.3125
Epoch 76: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 1.9747 - accuracy: 0.3125 - val_loss: 2.3290 - val_accuracy: 0.2562
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.9871 - accuracy: 0.2969
Epoch 77: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 1.9871 - accuracy: 0.2969 - val_loss: 2.0156 - val_accuracy: 0.2937
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.9793 - accuracy: 0.2939
Epoch 78: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 1.9793 - accuracy: 0.2939 - val_loss: 2.0635 - val_accuracy: 0.3625
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.9785 - accuracy: 0.2969
Epoch 79: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 1.9785 - accuracy: 0.2969 - val_loss: 2.0949 - val_accuracy: 0.2625
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.9570 - accuracy: 0.3193
Epoch 80: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 1.9570 - accuracy: 0.3193 - val_loss: 2.1119 - val_accuracy: 0.3438
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 2.0233 - accuracy: 0.2852
Epoch 81: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 2.0233 - accuracy: 0.2852 - val_loss: 1.9587 - val_accuracy: 0.3125
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.9822 - accuracy: 0.3125
Epoch 82: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 1.9822 - accuracy: 0.3125 - val_loss: 1.7857 - val_accuracy: 0.3938
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.9347 - accuracy: 0.3154
Epoch 83: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 1.9347 - accuracy: 0.3154 - val_loss: 2.5211 - val_accuracy: 0.2812
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.9297 - accuracy: 0.3262
Epoch 84: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 1.9297 - accuracy: 0.3262 - val_loss: 2.2044 - val_accuracy: 0.2562
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.9434 - accuracy: 0.2949
Epoch 85: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 1.9434 - accuracy: 0.2949 - val_loss: 2.5246 - val_accuracy: 0.2250
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.9994 - accuracy: 0.3008
Epoch 86: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 110ms/step - loss: 1.9994 - accuracy: 0.3008 - val_loss: 2.4070 - val_accuracy: 0.2375
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.9817 - accuracy: 0.2920
Epoch 87: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 113ms/step - loss: 1.9817 - accuracy: 0.2920 - val_loss: 2.2478 - val_accuracy: 0.3000
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.9603 - accuracy: 0.3164
Epoch 88: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 112ms/step - loss: 1.9603 - accuracy: 0.3164 - val_loss: 1.9432 - val_accuracy: 0.3562
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.9784 - accuracy: 0.2861
Epoch 89: val_accuracy did not improve from 0.40625
64/64 [==============================] - 7s 111ms/step - loss: 1.9784 - accuracy: 0.2861 - val_loss: 2.0514 - val_accuracy: 0.2812
Restoring model weights from the end of the best epoch: 49.
Epoch 89: early stopping
********* Training time: 1075.171875 s.
*****************
* Model Summary *
*****************
Model: "resnet50_imagenet"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50 (Functional) (None, 2048) 23587712
flatten_4 (Flatten) (None, 2048) 0
dense_4 (Dense) (None, 12) 24588
=================================================================
Total params: 23,612,300
Trainable params: 4,494,348
Non-trainable params: 19,117,952
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 7s 29ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.4665 0.5705 0.5133 305
anarrhichomenum 0.4877 0.4907 0.4892 322
brevantherum 0.0000 0.0000 0.0000 266
dulcamara 0.1779 0.2637 0.2124 311
herposolanum 0.1813 0.3405 0.2366 279
holophylla 0.3361 0.4291 0.3769 282
lasiocarpa 0.5681 0.6262 0.5957 313
melongena 0.4839 0.6688 0.5615 314
micracantha 0.2667 0.1739 0.2105 299
petota 0.3117 0.5133 0.3879 300
solanum 0.3765 0.1053 0.1645 304
torva 0.0000 0.0000 0.0000 305
accuracy 0.3539 3600
macro avg 0.3047 0.3485 0.3124 3600
weighted avg 0.3104 0.3539 0.3179 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
************************
* Started at 5497.0... *
************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 9.8503 - accuracy: 0.0977
Epoch 1: val_accuracy improved from -inf to 0.08125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 11s 134ms/step - loss: 9.8503 - accuracy: 0.0977 - val_loss: 22.7142 - val_accuracy: 0.0812
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 4.5344 - accuracy: 0.1270
Epoch 2: val_accuracy improved from 0.08125 to 0.08750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 119ms/step - loss: 4.5344 - accuracy: 0.1270 - val_loss: 3.2108 - val_accuracy: 0.0875
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 4.3073 - accuracy: 0.1211
Epoch 3: val_accuracy did not improve from 0.08750
64/64 [==============================] - 7s 114ms/step - loss: 4.3073 - accuracy: 0.1211 - val_loss: 2.6725 - val_accuracy: 0.0875
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 4.0181 - accuracy: 0.1338
Epoch 4: val_accuracy improved from 0.08750 to 0.16250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 122ms/step - loss: 4.0181 - accuracy: 0.1338 - val_loss: 2.7170 - val_accuracy: 0.1625
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 3.4527 - accuracy: 0.1455
Epoch 5: val_accuracy improved from 0.16250 to 0.17500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 118ms/step - loss: 3.4527 - accuracy: 0.1455 - val_loss: 2.5119 - val_accuracy: 0.1750
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 3.5159 - accuracy: 0.1445
Epoch 6: val_accuracy did not improve from 0.17500
64/64 [==============================] - 7s 114ms/step - loss: 3.5159 - accuracy: 0.1445 - val_loss: 2.6885 - val_accuracy: 0.1750
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 3.2669 - accuracy: 0.1807
Epoch 7: val_accuracy did not improve from 0.17500
64/64 [==============================] - 7s 112ms/step - loss: 3.2669 - accuracy: 0.1807 - val_loss: 2.4887 - val_accuracy: 0.1562
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 3.0566 - accuracy: 0.1436
Epoch 8: val_accuracy did not improve from 0.17500
64/64 [==============================] - 7s 111ms/step - loss: 3.0566 - accuracy: 0.1436 - val_loss: 2.9362 - val_accuracy: 0.1437
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 2.8269 - accuracy: 0.1650
Epoch 9: val_accuracy did not improve from 0.17500
64/64 [==============================] - 7s 113ms/step - loss: 2.8269 - accuracy: 0.1650 - val_loss: 2.7073 - val_accuracy: 0.1437
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 2.7722 - accuracy: 0.1689
Epoch 10: val_accuracy improved from 0.17500 to 0.24375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 122ms/step - loss: 2.7722 - accuracy: 0.1689 - val_loss: 2.2311 - val_accuracy: 0.2438
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 2.7109 - accuracy: 0.1816
Epoch 11: val_accuracy did not improve from 0.24375
64/64 [==============================] - 7s 110ms/step - loss: 2.7109 - accuracy: 0.1816 - val_loss: 2.1393 - val_accuracy: 0.2250
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 2.6327 - accuracy: 0.1836
Epoch 12: val_accuracy did not improve from 0.24375
64/64 [==============================] - 7s 113ms/step - loss: 2.6327 - accuracy: 0.1836 - val_loss: 2.4914 - val_accuracy: 0.2250
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 2.4545 - accuracy: 0.1826
Epoch 13: val_accuracy did not improve from 0.24375
64/64 [==============================] - 7s 110ms/step - loss: 2.4545 - accuracy: 0.1826 - val_loss: 2.2283 - val_accuracy: 0.2188
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 2.4535 - accuracy: 0.1953
Epoch 14: val_accuracy did not improve from 0.24375
64/64 [==============================] - 7s 110ms/step - loss: 2.4535 - accuracy: 0.1953 - val_loss: 2.5832 - val_accuracy: 0.1500
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 2.2875 - accuracy: 0.2051
Epoch 15: val_accuracy did not improve from 0.24375
64/64 [==============================] - 7s 113ms/step - loss: 2.2875 - accuracy: 0.2051 - val_loss: 2.6106 - val_accuracy: 0.1562
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 2.3792 - accuracy: 0.2129
Epoch 16: val_accuracy did not improve from 0.24375
64/64 [==============================] - 7s 111ms/step - loss: 2.3792 - accuracy: 0.2129 - val_loss: 2.1985 - val_accuracy: 0.2375
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 2.3074 - accuracy: 0.2080
Epoch 17: val_accuracy did not improve from 0.24375
64/64 [==============================] - 7s 111ms/step - loss: 2.3074 - accuracy: 0.2080 - val_loss: 2.3871 - val_accuracy: 0.2188
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 2.3485 - accuracy: 0.2002
Epoch 18: val_accuracy improved from 0.24375 to 0.30625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 124ms/step - loss: 2.3485 - accuracy: 0.2002 - val_loss: 2.2747 - val_accuracy: 0.3063
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 2.2555 - accuracy: 0.2275
Epoch 19: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 113ms/step - loss: 2.2555 - accuracy: 0.2275 - val_loss: 2.3419 - val_accuracy: 0.2500
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 2.2360 - accuracy: 0.2344
Epoch 20: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 2.2360 - accuracy: 0.2344 - val_loss: 2.2133 - val_accuracy: 0.2562
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 2.1993 - accuracy: 0.2422
Epoch 21: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 113ms/step - loss: 2.1993 - accuracy: 0.2422 - val_loss: 2.0711 - val_accuracy: 0.2750
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 2.2176 - accuracy: 0.2490
Epoch 22: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 2.2176 - accuracy: 0.2490 - val_loss: 2.0663 - val_accuracy: 0.2812
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 2.1976 - accuracy: 0.2471
Epoch 23: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 2.1976 - accuracy: 0.2471 - val_loss: 2.1059 - val_accuracy: 0.2500
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 2.2096 - accuracy: 0.2412
Epoch 24: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 113ms/step - loss: 2.2096 - accuracy: 0.2412 - val_loss: 2.0671 - val_accuracy: 0.2688
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 2.2061 - accuracy: 0.2285
Epoch 25: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 2.2061 - accuracy: 0.2285 - val_loss: 2.3344 - val_accuracy: 0.2500
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 2.2319 - accuracy: 0.2334
Epoch 26: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 2.2319 - accuracy: 0.2334 - val_loss: 2.0505 - val_accuracy: 0.2875
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 2.1662 - accuracy: 0.2354
Epoch 27: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 113ms/step - loss: 2.1662 - accuracy: 0.2354 - val_loss: 2.2526 - val_accuracy: 0.2438
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 2.2488 - accuracy: 0.2393
Epoch 28: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 2.2488 - accuracy: 0.2393 - val_loss: 2.5025 - val_accuracy: 0.1688
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 2.0877 - accuracy: 0.2861
Epoch 29: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 2.0877 - accuracy: 0.2861 - val_loss: 2.9600 - val_accuracy: 0.1562
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 2.1481 - accuracy: 0.2480
Epoch 30: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 114ms/step - loss: 2.1481 - accuracy: 0.2480 - val_loss: 2.4713 - val_accuracy: 0.2250
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 2.2033 - accuracy: 0.2393
Epoch 31: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 113ms/step - loss: 2.2033 - accuracy: 0.2393 - val_loss: 2.2750 - val_accuracy: 0.2313
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 2.1308 - accuracy: 0.2490
Epoch 32: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 2.1308 - accuracy: 0.2490 - val_loss: 2.0739 - val_accuracy: 0.2875
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 2.2186 - accuracy: 0.2061
Epoch 33: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 114ms/step - loss: 2.2186 - accuracy: 0.2061 - val_loss: 2.5815 - val_accuracy: 0.1750
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 2.1283 - accuracy: 0.2412
Epoch 34: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 2.1283 - accuracy: 0.2412 - val_loss: 2.1554 - val_accuracy: 0.2937
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 2.1071 - accuracy: 0.2705
Epoch 35: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 2.1071 - accuracy: 0.2705 - val_loss: 2.2109 - val_accuracy: 0.2688
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 2.1297 - accuracy: 0.2832
Epoch 36: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 114ms/step - loss: 2.1297 - accuracy: 0.2832 - val_loss: 2.1049 - val_accuracy: 0.3063
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 2.0634 - accuracy: 0.2803
Epoch 37: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 111ms/step - loss: 2.0634 - accuracy: 0.2803 - val_loss: 2.2719 - val_accuracy: 0.2625
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 2.1403 - accuracy: 0.2686
Epoch 38: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 112ms/step - loss: 2.1403 - accuracy: 0.2686 - val_loss: 2.1409 - val_accuracy: 0.2812
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 2.0373 - accuracy: 0.2998
Epoch 39: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 114ms/step - loss: 2.0373 - accuracy: 0.2998 - val_loss: 2.0417 - val_accuracy: 0.2688
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 2.1194 - accuracy: 0.2646
Epoch 40: val_accuracy did not improve from 0.30625
64/64 [==============================] - 7s 110ms/step - loss: 2.1194 - accuracy: 0.2646 - val_loss: 2.1978 - val_accuracy: 0.2500
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 2.0425 - accuracy: 0.2715
Epoch 41: val_accuracy improved from 0.30625 to 0.31875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 121ms/step - loss: 2.0425 - accuracy: 0.2715 - val_loss: 1.9127 - val_accuracy: 0.3187
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 2.1201 - accuracy: 0.2617
Epoch 42: val_accuracy improved from 0.31875 to 0.33125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 125ms/step - loss: 2.1201 - accuracy: 0.2617 - val_loss: 1.9074 - val_accuracy: 0.3313
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 2.0667 - accuracy: 0.2793
Epoch 43: val_accuracy did not improve from 0.33125
64/64 [==============================] - 7s 111ms/step - loss: 2.0667 - accuracy: 0.2793 - val_loss: 2.5365 - val_accuracy: 0.1813
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 2.0616 - accuracy: 0.2910
Epoch 44: val_accuracy did not improve from 0.33125
64/64 [==============================] - 7s 112ms/step - loss: 2.0616 - accuracy: 0.2910 - val_loss: 2.0678 - val_accuracy: 0.2875
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 2.0792 - accuracy: 0.2715
Epoch 45: val_accuracy did not improve from 0.33125
64/64 [==============================] - 7s 113ms/step - loss: 2.0792 - accuracy: 0.2715 - val_loss: 2.0142 - val_accuracy: 0.2750
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 2.0404 - accuracy: 0.2695
Epoch 46: val_accuracy did not improve from 0.33125
64/64 [==============================] - 7s 110ms/step - loss: 2.0404 - accuracy: 0.2695 - val_loss: 2.0404 - val_accuracy: 0.3000
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 2.1117 - accuracy: 0.2715
Epoch 47: val_accuracy improved from 0.33125 to 0.34375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 122ms/step - loss: 2.1117 - accuracy: 0.2715 - val_loss: 1.9697 - val_accuracy: 0.3438
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 2.0592 - accuracy: 0.2822
Epoch 48: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 115ms/step - loss: 2.0592 - accuracy: 0.2822 - val_loss: 2.0414 - val_accuracy: 0.2688
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 2.1009 - accuracy: 0.2568
Epoch 49: val_accuracy did not improve from 0.34375
64/64 [==============================] - 7s 112ms/step - loss: 2.1009 - accuracy: 0.2568 - val_loss: 2.3856 - val_accuracy: 0.2188
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 2.1028 - accuracy: 0.2529
Epoch 50: val_accuracy improved from 0.34375 to 0.35000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_2.h5
64/64 [==============================] - 8s 120ms/step - loss: 2.1028 - accuracy: 0.2529 - val_loss: 2.0256 - val_accuracy: 0.3500
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 2.0454 - accuracy: 0.3008
Epoch 51: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 2.0454 - accuracy: 0.3008 - val_loss: 1.9409 - val_accuracy: 0.2562
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 2.0632 - accuracy: 0.2705
Epoch 52: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 2.0632 - accuracy: 0.2705 - val_loss: 1.9821 - val_accuracy: 0.3125
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 2.0574 - accuracy: 0.2666
Epoch 53: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.0574 - accuracy: 0.2666 - val_loss: 1.9018 - val_accuracy: 0.3438
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 2.0989 - accuracy: 0.2646
Epoch 54: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 2.0989 - accuracy: 0.2646 - val_loss: 2.3757 - val_accuracy: 0.2250
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 2.0596 - accuracy: 0.2783
Epoch 55: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.0596 - accuracy: 0.2783 - val_loss: 2.2695 - val_accuracy: 0.2625
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 2.0369 - accuracy: 0.2881
Epoch 56: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.0369 - accuracy: 0.2881 - val_loss: 2.0933 - val_accuracy: 0.2812
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.9764 - accuracy: 0.2891
Epoch 57: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.9764 - accuracy: 0.2891 - val_loss: 2.0430 - val_accuracy: 0.3063
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 2.0222 - accuracy: 0.3018
Epoch 58: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 2.0222 - accuracy: 0.3018 - val_loss: 1.9825 - val_accuracy: 0.3187
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 2.0559 - accuracy: 0.2900
Epoch 59: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.0559 - accuracy: 0.2900 - val_loss: 2.1429 - val_accuracy: 0.2688
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.9912 - accuracy: 0.2939
Epoch 60: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 114ms/step - loss: 1.9912 - accuracy: 0.2939 - val_loss: 2.0032 - val_accuracy: 0.3187
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.9638 - accuracy: 0.2959
Epoch 61: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.9638 - accuracy: 0.2959 - val_loss: 2.3918 - val_accuracy: 0.2313
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 2.0423 - accuracy: 0.3057
Epoch 62: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.0423 - accuracy: 0.3057 - val_loss: 1.9531 - val_accuracy: 0.3187
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 2.0115 - accuracy: 0.2910
Epoch 63: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 2.0115 - accuracy: 0.2910 - val_loss: 2.0715 - val_accuracy: 0.3313
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 2.0376 - accuracy: 0.2959
Epoch 64: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.0376 - accuracy: 0.2959 - val_loss: 1.8788 - val_accuracy: 0.3438
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.9894 - accuracy: 0.2910
Epoch 65: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.9894 - accuracy: 0.2910 - val_loss: 2.3077 - val_accuracy: 0.2313
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 2.0639 - accuracy: 0.2715
Epoch 66: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 2.0639 - accuracy: 0.2715 - val_loss: 2.3708 - val_accuracy: 0.2313
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 2.0666 - accuracy: 0.2764
Epoch 67: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.0666 - accuracy: 0.2764 - val_loss: 2.1583 - val_accuracy: 0.2937
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 2.0124 - accuracy: 0.3076
Epoch 68: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.0124 - accuracy: 0.3076 - val_loss: 2.3681 - val_accuracy: 0.2750
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.9758 - accuracy: 0.3057
Epoch 69: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.9758 - accuracy: 0.3057 - val_loss: 2.0078 - val_accuracy: 0.2937
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.9811 - accuracy: 0.3047
Epoch 70: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 1.9811 - accuracy: 0.3047 - val_loss: 2.1495 - val_accuracy: 0.2438
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 2.0719 - accuracy: 0.2881
Epoch 71: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 2.0719 - accuracy: 0.2881 - val_loss: 2.2064 - val_accuracy: 0.3063
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 2.0287 - accuracy: 0.2861
Epoch 72: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.0287 - accuracy: 0.2861 - val_loss: 1.9634 - val_accuracy: 0.3000
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 2.0277 - accuracy: 0.2959
Epoch 73: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.0277 - accuracy: 0.2959 - val_loss: 2.0232 - val_accuracy: 0.3187
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.9644 - accuracy: 0.2783
Epoch 74: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 1.9644 - accuracy: 0.2783 - val_loss: 2.0943 - val_accuracy: 0.3125
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 2.0595 - accuracy: 0.2773
Epoch 75: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 2.0595 - accuracy: 0.2773 - val_loss: 2.1096 - val_accuracy: 0.3125
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.9773 - accuracy: 0.3096
Epoch 76: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.9773 - accuracy: 0.3096 - val_loss: 1.9098 - val_accuracy: 0.2812
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 2.0371 - accuracy: 0.2930
Epoch 77: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.0371 - accuracy: 0.2930 - val_loss: 1.9801 - val_accuracy: 0.3125
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.9874 - accuracy: 0.3242
Epoch 78: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.9874 - accuracy: 0.3242 - val_loss: 2.0968 - val_accuracy: 0.2812
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 1.9779 - accuracy: 0.2900
Epoch 79: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.9779 - accuracy: 0.2900 - val_loss: 1.9657 - val_accuracy: 0.3313
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.9844 - accuracy: 0.3027
Epoch 80: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 1.9844 - accuracy: 0.3027 - val_loss: 1.9642 - val_accuracy: 0.3000
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 2.0159 - accuracy: 0.2930
Epoch 81: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 2.0159 - accuracy: 0.2930 - val_loss: 2.0018 - val_accuracy: 0.3313
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.9852 - accuracy: 0.2969
Epoch 82: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.9852 - accuracy: 0.2969 - val_loss: 1.8741 - val_accuracy: 0.3375
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 2.0389 - accuracy: 0.2842
Epoch 83: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.0389 - accuracy: 0.2842 - val_loss: 2.0593 - val_accuracy: 0.3063
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.9768 - accuracy: 0.3086
Epoch 84: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.9768 - accuracy: 0.3086 - val_loss: 2.2043 - val_accuracy: 0.2688
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.9731 - accuracy: 0.2998
Epoch 85: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 1.9731 - accuracy: 0.2998 - val_loss: 1.8762 - val_accuracy: 0.3375
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.9439 - accuracy: 0.3154
Epoch 86: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.9439 - accuracy: 0.3154 - val_loss: 2.4517 - val_accuracy: 0.2125
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 1.9912 - accuracy: 0.2939
Epoch 87: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.9912 - accuracy: 0.2939 - val_loss: 2.0583 - val_accuracy: 0.2750
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.9634 - accuracy: 0.3076
Epoch 88: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 1.9634 - accuracy: 0.3076 - val_loss: 2.1731 - val_accuracy: 0.2688
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.9754 - accuracy: 0.2920
Epoch 89: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 1.9754 - accuracy: 0.2920 - val_loss: 1.9530 - val_accuracy: 0.3187
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 1.9596 - accuracy: 0.3047
Epoch 90: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 1.9596 - accuracy: 0.3047 - val_loss: 2.6071 - val_accuracy: 0.2125
Restoring model weights from the end of the best epoch: 50.
Epoch 90: early stopping
********* Training time: 1087.09375 s.
*****************
* Model Summary *
*****************
Model: "resnet50_imagenet"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50 (Functional) (None, 2048) 23587712
flatten_5 (Flatten) (None, 2048) 0
dense_5 (Dense) (None, 12) 24588
=================================================================
Total params: 23,612,300
Trainable params: 4,494,348
Non-trainable params: 19,117,952
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 7s 28ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.4069 0.4260 0.4162 277
anarrhichomenum 0.4498 0.6540 0.5330 315
brevantherum 0.2377 0.4723 0.3162 307
dulcamara 0.3500 0.0236 0.0442 297
herposolanum 0.6667 0.0139 0.0272 288
holophylla 0.4138 0.2937 0.3436 286
lasiocarpa 0.7521 0.2898 0.4184 314
melongena 0.4367 0.5567 0.4894 291
micracantha 0.2349 0.2397 0.2373 292
petota 0.2148 0.6707 0.3254 328
solanum 0.1809 0.1184 0.1431 304
torva 0.0000 0.0000 0.0000 301
accuracy 0.3175 3600
macro avg 0.3620 0.3132 0.2745 3600
weighted avg 0.3608 0.3175 0.2758 3600
********************
* Confusion Matrix *
********************
C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
************************************** * Train/Val Accuracy and Loss graphs * **************************************
***************************
* Started at 6597.8125... *
***************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 11.1766 - accuracy: 0.1152
Epoch 1: val_accuracy improved from -inf to 0.10625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 11s 133ms/step - loss: 11.1766 - accuracy: 0.1152 - val_loss: 43.4558 - val_accuracy: 0.1063
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 3.6561 - accuracy: 0.1084
Epoch 2: val_accuracy did not improve from 0.10625
64/64 [==============================] - 7s 113ms/step - loss: 3.6561 - accuracy: 0.1084 - val_loss: 9.2981 - val_accuracy: 0.1000
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 3.5347 - accuracy: 0.1338
Epoch 3: val_accuracy improved from 0.10625 to 0.16875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 124ms/step - loss: 3.5347 - accuracy: 0.1338 - val_loss: 5.4612 - val_accuracy: 0.1688
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 3.4930 - accuracy: 0.1357
Epoch 4: val_accuracy improved from 0.16875 to 0.18750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 126ms/step - loss: 3.4930 - accuracy: 0.1357 - val_loss: 3.0134 - val_accuracy: 0.1875
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 3.2009 - accuracy: 0.1250
Epoch 5: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 113ms/step - loss: 3.2009 - accuracy: 0.1250 - val_loss: 2.7365 - val_accuracy: 0.1375
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 2.7497 - accuracy: 0.1416
Epoch 6: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 110ms/step - loss: 2.7497 - accuracy: 0.1416 - val_loss: 3.4174 - val_accuracy: 0.1437
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 2.9649 - accuracy: 0.1602
Epoch 7: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 113ms/step - loss: 2.9649 - accuracy: 0.1602 - val_loss: 2.9283 - val_accuracy: 0.1312
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 2.7077 - accuracy: 0.1709
Epoch 8: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 112ms/step - loss: 2.7077 - accuracy: 0.1709 - val_loss: 3.2074 - val_accuracy: 0.1562
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 2.6437 - accuracy: 0.1729
Epoch 9: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 109ms/step - loss: 2.6437 - accuracy: 0.1729 - val_loss: 3.4065 - val_accuracy: 0.1187
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 2.6017 - accuracy: 0.1973
Epoch 10: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 113ms/step - loss: 2.6017 - accuracy: 0.1973 - val_loss: 2.7921 - val_accuracy: 0.1562
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 2.5681 - accuracy: 0.2002
Epoch 11: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 113ms/step - loss: 2.5681 - accuracy: 0.2002 - val_loss: 2.8528 - val_accuracy: 0.1562
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 2.5178 - accuracy: 0.1895
Epoch 12: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 110ms/step - loss: 2.5178 - accuracy: 0.1895 - val_loss: 2.4302 - val_accuracy: 0.1562
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 2.4702 - accuracy: 0.2070
Epoch 13: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 112ms/step - loss: 2.4702 - accuracy: 0.2070 - val_loss: 2.7332 - val_accuracy: 0.1187
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 2.4257 - accuracy: 0.2090
Epoch 14: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 112ms/step - loss: 2.4257 - accuracy: 0.2090 - val_loss: 2.6699 - val_accuracy: 0.1562
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 2.3387 - accuracy: 0.2275
Epoch 15: val_accuracy improved from 0.18750 to 0.21875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 120ms/step - loss: 2.3387 - accuracy: 0.2275 - val_loss: 2.6659 - val_accuracy: 0.2188
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 2.3468 - accuracy: 0.1963
Epoch 16: val_accuracy did not improve from 0.21875
64/64 [==============================] - 7s 113ms/step - loss: 2.3468 - accuracy: 0.1963 - val_loss: 2.7101 - val_accuracy: 0.2000
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 2.3172 - accuracy: 0.2109
Epoch 17: val_accuracy did not improve from 0.21875
64/64 [==============================] - 7s 114ms/step - loss: 2.3172 - accuracy: 0.2109 - val_loss: 2.4767 - val_accuracy: 0.1937
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 2.3300 - accuracy: 0.2051
Epoch 18: val_accuracy did not improve from 0.21875
64/64 [==============================] - 7s 110ms/step - loss: 2.3300 - accuracy: 0.2051 - val_loss: 2.7541 - val_accuracy: 0.1750
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 2.2564 - accuracy: 0.2314
Epoch 19: val_accuracy improved from 0.21875 to 0.23125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 123ms/step - loss: 2.2564 - accuracy: 0.2314 - val_loss: 2.2739 - val_accuracy: 0.2313
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 2.3363 - accuracy: 0.1982
Epoch 20: val_accuracy improved from 0.23125 to 0.25000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 123ms/step - loss: 2.3363 - accuracy: 0.1982 - val_loss: 2.1122 - val_accuracy: 0.2500
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 2.2874 - accuracy: 0.2236
Epoch 21: val_accuracy did not improve from 0.25000
64/64 [==============================] - 7s 111ms/step - loss: 2.2874 - accuracy: 0.2236 - val_loss: 2.3468 - val_accuracy: 0.1875
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 2.1846 - accuracy: 0.2402
Epoch 22: val_accuracy improved from 0.25000 to 0.26250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 124ms/step - loss: 2.1846 - accuracy: 0.2402 - val_loss: 2.0886 - val_accuracy: 0.2625
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 2.1934 - accuracy: 0.2148
Epoch 23: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 113ms/step - loss: 2.1934 - accuracy: 0.2148 - val_loss: 2.2539 - val_accuracy: 0.2375
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 2.2313 - accuracy: 0.2188
Epoch 24: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 109ms/step - loss: 2.2313 - accuracy: 0.2188 - val_loss: 2.8617 - val_accuracy: 0.1187
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 2.2268 - accuracy: 0.2236
Epoch 25: val_accuracy improved from 0.26250 to 0.33750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 122ms/step - loss: 2.2268 - accuracy: 0.2236 - val_loss: 1.9329 - val_accuracy: 0.3375
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 2.1691 - accuracy: 0.2432
Epoch 26: val_accuracy did not improve from 0.33750
64/64 [==============================] - 7s 111ms/step - loss: 2.1691 - accuracy: 0.2432 - val_loss: 1.9413 - val_accuracy: 0.3250
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 2.1059 - accuracy: 0.2646
Epoch 27: val_accuracy did not improve from 0.33750
64/64 [==============================] - 7s 111ms/step - loss: 2.1059 - accuracy: 0.2646 - val_loss: 2.3781 - val_accuracy: 0.2125
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 2.1711 - accuracy: 0.2334
Epoch 28: val_accuracy did not improve from 0.33750
64/64 [==============================] - 7s 112ms/step - loss: 2.1711 - accuracy: 0.2334 - val_loss: 2.0072 - val_accuracy: 0.2688
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 2.1386 - accuracy: 0.2559
Epoch 29: val_accuracy did not improve from 0.33750
64/64 [==============================] - 7s 111ms/step - loss: 2.1386 - accuracy: 0.2559 - val_loss: 2.1186 - val_accuracy: 0.2562
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 2.1060 - accuracy: 0.2637
Epoch 30: val_accuracy improved from 0.33750 to 0.35000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 120ms/step - loss: 2.1060 - accuracy: 0.2637 - val_loss: 1.8518 - val_accuracy: 0.3500
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 2.1088 - accuracy: 0.2510
Epoch 31: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 2.1088 - accuracy: 0.2510 - val_loss: 2.9913 - val_accuracy: 0.1937
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 2.1702 - accuracy: 0.2588
Epoch 32: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.1702 - accuracy: 0.2588 - val_loss: 1.9306 - val_accuracy: 0.3125
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 2.0957 - accuracy: 0.2598
Epoch 33: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 2.0957 - accuracy: 0.2598 - val_loss: 2.2717 - val_accuracy: 0.2625
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 2.1003 - accuracy: 0.2578
Epoch 34: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.1003 - accuracy: 0.2578 - val_loss: 1.9517 - val_accuracy: 0.3500
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 2.1274 - accuracy: 0.2617
Epoch 35: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 2.1274 - accuracy: 0.2617 - val_loss: 2.6244 - val_accuracy: 0.2062
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 2.1173 - accuracy: 0.2617
Epoch 36: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 2.1173 - accuracy: 0.2617 - val_loss: 2.1521 - val_accuracy: 0.2562
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 2.1531 - accuracy: 0.2646
Epoch 37: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.1531 - accuracy: 0.2646 - val_loss: 2.1594 - val_accuracy: 0.2625
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 2.1526 - accuracy: 0.2686
Epoch 38: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 2.1526 - accuracy: 0.2686 - val_loss: 2.3501 - val_accuracy: 0.2625
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 2.0852 - accuracy: 0.2695
Epoch 39: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 2.0852 - accuracy: 0.2695 - val_loss: 2.0005 - val_accuracy: 0.3313
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 2.1205 - accuracy: 0.2559
Epoch 40: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.1205 - accuracy: 0.2559 - val_loss: 2.1799 - val_accuracy: 0.3250
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 2.0945 - accuracy: 0.2656
Epoch 41: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 2.0945 - accuracy: 0.2656 - val_loss: 1.9478 - val_accuracy: 0.3125
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 2.0707 - accuracy: 0.2852
Epoch 42: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 110ms/step - loss: 2.0707 - accuracy: 0.2852 - val_loss: 2.1393 - val_accuracy: 0.2438
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 2.0212 - accuracy: 0.2568
Epoch 43: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 113ms/step - loss: 2.0212 - accuracy: 0.2568 - val_loss: 2.4299 - val_accuracy: 0.2688
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 2.0899 - accuracy: 0.2568
Epoch 44: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 111ms/step - loss: 2.0899 - accuracy: 0.2568 - val_loss: 1.9229 - val_accuracy: 0.3000
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 2.0571 - accuracy: 0.2793
Epoch 45: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 109ms/step - loss: 2.0571 - accuracy: 0.2793 - val_loss: 2.1407 - val_accuracy: 0.3125
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 2.0801 - accuracy: 0.2773
Epoch 46: val_accuracy did not improve from 0.35000
64/64 [==============================] - 7s 112ms/step - loss: 2.0801 - accuracy: 0.2773 - val_loss: 1.8921 - val_accuracy: 0.3187
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 2.0618 - accuracy: 0.2646
Epoch 47: val_accuracy improved from 0.35000 to 0.35625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 119ms/step - loss: 2.0618 - accuracy: 0.2646 - val_loss: 1.8386 - val_accuracy: 0.3562
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 2.0397 - accuracy: 0.2861
Epoch 48: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 110ms/step - loss: 2.0397 - accuracy: 0.2861 - val_loss: 1.9780 - val_accuracy: 0.3063
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 2.0289 - accuracy: 0.2803
Epoch 49: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 113ms/step - loss: 2.0289 - accuracy: 0.2803 - val_loss: 2.1016 - val_accuracy: 0.2500
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 2.0642 - accuracy: 0.2676
Epoch 50: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 110ms/step - loss: 2.0642 - accuracy: 0.2676 - val_loss: 2.3822 - val_accuracy: 0.2125
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 2.0214 - accuracy: 0.2783
Epoch 51: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 110ms/step - loss: 2.0214 - accuracy: 0.2783 - val_loss: 2.0041 - val_accuracy: 0.2812
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 2.1384 - accuracy: 0.2471
Epoch 52: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 113ms/step - loss: 2.1384 - accuracy: 0.2471 - val_loss: 2.0910 - val_accuracy: 0.2438
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 2.0261 - accuracy: 0.2871
Epoch 53: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 112ms/step - loss: 2.0261 - accuracy: 0.2871 - val_loss: 2.1626 - val_accuracy: 0.2625
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 2.0727 - accuracy: 0.2744
Epoch 54: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 110ms/step - loss: 2.0727 - accuracy: 0.2744 - val_loss: 2.1077 - val_accuracy: 0.2812
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 2.0129 - accuracy: 0.3066
Epoch 55: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 115ms/step - loss: 2.0129 - accuracy: 0.3066 - val_loss: 1.9317 - val_accuracy: 0.3000
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 2.0506 - accuracy: 0.2695
Epoch 56: val_accuracy did not improve from 0.35625
64/64 [==============================] - 7s 111ms/step - loss: 2.0506 - accuracy: 0.2695 - val_loss: 1.8144 - val_accuracy: 0.3438
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 1.9901 - accuracy: 0.3174
Epoch 57: val_accuracy improved from 0.35625 to 0.40000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 121ms/step - loss: 1.9901 - accuracy: 0.3174 - val_loss: 1.6739 - val_accuracy: 0.4000
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 2.0707 - accuracy: 0.2725
Epoch 58: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 108ms/step - loss: 2.0707 - accuracy: 0.2725 - val_loss: 2.0424 - val_accuracy: 0.3187
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 2.0268 - accuracy: 0.2832
Epoch 59: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 108ms/step - loss: 2.0268 - accuracy: 0.2832 - val_loss: 1.8625 - val_accuracy: 0.3438
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 2.0100 - accuracy: 0.2705
Epoch 60: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 107ms/step - loss: 2.0100 - accuracy: 0.2705 - val_loss: 2.8113 - val_accuracy: 0.2562
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 2.0457 - accuracy: 0.2979
Epoch 61: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 109ms/step - loss: 2.0457 - accuracy: 0.2979 - val_loss: 2.1269 - val_accuracy: 0.3000
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 2.0108 - accuracy: 0.3047
Epoch 62: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 108ms/step - loss: 2.0108 - accuracy: 0.3047 - val_loss: 2.0911 - val_accuracy: 0.2812
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 1.9669 - accuracy: 0.2988
Epoch 63: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 108ms/step - loss: 1.9669 - accuracy: 0.2988 - val_loss: 1.8431 - val_accuracy: 0.3375
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 2.0496 - accuracy: 0.2734
Epoch 64: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 108ms/step - loss: 2.0496 - accuracy: 0.2734 - val_loss: 2.2768 - val_accuracy: 0.2313
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 1.9811 - accuracy: 0.2969
Epoch 65: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 108ms/step - loss: 1.9811 - accuracy: 0.2969 - val_loss: 1.7741 - val_accuracy: 0.3750
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 2.0101 - accuracy: 0.2871
Epoch 66: val_accuracy did not improve from 0.40000
64/64 [==============================] - 7s 108ms/step - loss: 2.0101 - accuracy: 0.2871 - val_loss: 1.8571 - val_accuracy: 0.3313
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 1.9930 - accuracy: 0.2949
Epoch 67: val_accuracy improved from 0.40000 to 0.43125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_3.h5
64/64 [==============================] - 8s 118ms/step - loss: 1.9930 - accuracy: 0.2949 - val_loss: 1.6616 - val_accuracy: 0.4313
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 2.0027 - accuracy: 0.2705
Epoch 68: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 109ms/step - loss: 2.0027 - accuracy: 0.2705 - val_loss: 1.6820 - val_accuracy: 0.4062
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 2.0051 - accuracy: 0.3018
Epoch 69: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 111ms/step - loss: 2.0051 - accuracy: 0.3018 - val_loss: 2.0241 - val_accuracy: 0.2875
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.9380 - accuracy: 0.3154
Epoch 70: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 112ms/step - loss: 1.9380 - accuracy: 0.3154 - val_loss: 1.8149 - val_accuracy: 0.4125
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 2.0036 - accuracy: 0.3008
Epoch 71: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 109ms/step - loss: 2.0036 - accuracy: 0.3008 - val_loss: 1.8600 - val_accuracy: 0.3250
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 2.0047 - accuracy: 0.2969
Epoch 72: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 2.0047 - accuracy: 0.2969 - val_loss: 2.2350 - val_accuracy: 0.2313
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 2.0048 - accuracy: 0.3076
Epoch 73: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 113ms/step - loss: 2.0048 - accuracy: 0.3076 - val_loss: 1.9028 - val_accuracy: 0.3125
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 1.9865 - accuracy: 0.2979
Epoch 74: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9865 - accuracy: 0.2979 - val_loss: 1.8901 - val_accuracy: 0.2937
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.9857 - accuracy: 0.2900
Epoch 75: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 111ms/step - loss: 1.9857 - accuracy: 0.2900 - val_loss: 2.5079 - val_accuracy: 0.2125
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 2.0299 - accuracy: 0.2998
Epoch 76: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 113ms/step - loss: 2.0299 - accuracy: 0.2998 - val_loss: 1.9009 - val_accuracy: 0.3438
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.9742 - accuracy: 0.2920
Epoch 77: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 111ms/step - loss: 1.9742 - accuracy: 0.2920 - val_loss: 1.9686 - val_accuracy: 0.3250
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.9757 - accuracy: 0.2871
Epoch 78: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9757 - accuracy: 0.2871 - val_loss: 1.7540 - val_accuracy: 0.4000
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 2.0105 - accuracy: 0.3027
Epoch 79: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 113ms/step - loss: 2.0105 - accuracy: 0.3027 - val_loss: 1.7444 - val_accuracy: 0.3938
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.9518 - accuracy: 0.3125
Epoch 80: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9518 - accuracy: 0.3125 - val_loss: 2.1504 - val_accuracy: 0.3250
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 2.0446 - accuracy: 0.2744
Epoch 81: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 111ms/step - loss: 2.0446 - accuracy: 0.2744 - val_loss: 2.2355 - val_accuracy: 0.2500
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 1.9738 - accuracy: 0.3057
Epoch 82: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 112ms/step - loss: 1.9738 - accuracy: 0.3057 - val_loss: 2.0507 - val_accuracy: 0.2812
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.9975 - accuracy: 0.3037
Epoch 83: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9975 - accuracy: 0.3037 - val_loss: 2.0518 - val_accuracy: 0.2688
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.9553 - accuracy: 0.2949
Epoch 84: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9553 - accuracy: 0.2949 - val_loss: 2.2014 - val_accuracy: 0.2875
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.9551 - accuracy: 0.3232
Epoch 85: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 113ms/step - loss: 1.9551 - accuracy: 0.3232 - val_loss: 2.5504 - val_accuracy: 0.2375
Epoch 86/105
64/64 [==============================] - ETA: 0s - loss: 1.9649 - accuracy: 0.3008
Epoch 86: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9649 - accuracy: 0.3008 - val_loss: 1.8823 - val_accuracy: 0.3500
Epoch 87/105
64/64 [==============================] - ETA: 0s - loss: 2.0089 - accuracy: 0.2988
Epoch 87: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 2.0089 - accuracy: 0.2988 - val_loss: 2.0038 - val_accuracy: 0.3063
Epoch 88/105
64/64 [==============================] - ETA: 0s - loss: 1.9362 - accuracy: 0.3105
Epoch 88: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 113ms/step - loss: 1.9362 - accuracy: 0.3105 - val_loss: 1.9549 - val_accuracy: 0.3375
Epoch 89/105
64/64 [==============================] - ETA: 0s - loss: 1.9741 - accuracy: 0.2939
Epoch 89: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 111ms/step - loss: 1.9741 - accuracy: 0.2939 - val_loss: 1.7678 - val_accuracy: 0.4062
Epoch 90/105
64/64 [==============================] - ETA: 0s - loss: 2.0210 - accuracy: 0.2959
Epoch 90: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 2.0210 - accuracy: 0.2959 - val_loss: 1.9524 - val_accuracy: 0.3313
Epoch 91/105
64/64 [==============================] - ETA: 0s - loss: 1.9242 - accuracy: 0.3340
Epoch 91: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 113ms/step - loss: 1.9242 - accuracy: 0.3340 - val_loss: 1.7315 - val_accuracy: 0.3750
Epoch 92/105
64/64 [==============================] - ETA: 0s - loss: 1.9023 - accuracy: 0.3311
Epoch 92: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 111ms/step - loss: 1.9023 - accuracy: 0.3311 - val_loss: 2.0749 - val_accuracy: 0.2875
Epoch 93/105
64/64 [==============================] - ETA: 0s - loss: 1.9381 - accuracy: 0.3242
Epoch 93: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9381 - accuracy: 0.3242 - val_loss: 1.7280 - val_accuracy: 0.3938
Epoch 94/105
64/64 [==============================] - ETA: 0s - loss: 1.9719 - accuracy: 0.3027
Epoch 94: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 112ms/step - loss: 1.9719 - accuracy: 0.3027 - val_loss: 1.8577 - val_accuracy: 0.3375
Epoch 95/105
64/64 [==============================] - ETA: 0s - loss: 1.9168 - accuracy: 0.3174
Epoch 95: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9168 - accuracy: 0.3174 - val_loss: 2.0917 - val_accuracy: 0.3187
Epoch 96/105
64/64 [==============================] - ETA: 0s - loss: 1.9590 - accuracy: 0.3232
Epoch 96: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9590 - accuracy: 0.3232 - val_loss: 2.3261 - val_accuracy: 0.2625
Epoch 97/105
64/64 [==============================] - ETA: 0s - loss: 1.9481 - accuracy: 0.3115
Epoch 97: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 113ms/step - loss: 1.9481 - accuracy: 0.3115 - val_loss: 2.0445 - val_accuracy: 0.3000
Epoch 98/105
64/64 [==============================] - ETA: 0s - loss: 1.9976 - accuracy: 0.3057
Epoch 98: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 111ms/step - loss: 1.9976 - accuracy: 0.3057 - val_loss: 2.1486 - val_accuracy: 0.2688
Epoch 99/105
64/64 [==============================] - ETA: 0s - loss: 1.8851 - accuracy: 0.3252
Epoch 99: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.8851 - accuracy: 0.3252 - val_loss: 2.1356 - val_accuracy: 0.3063
Epoch 100/105
64/64 [==============================] - ETA: 0s - loss: 1.9671 - accuracy: 0.3164
Epoch 100: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 114ms/step - loss: 1.9671 - accuracy: 0.3164 - val_loss: 1.8085 - val_accuracy: 0.3750
Epoch 101/105
64/64 [==============================] - ETA: 0s - loss: 1.9481 - accuracy: 0.3105
Epoch 101: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9481 - accuracy: 0.3105 - val_loss: 1.8217 - val_accuracy: 0.3187
Epoch 102/105
64/64 [==============================] - ETA: 0s - loss: 1.9560 - accuracy: 0.3066
Epoch 102: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9560 - accuracy: 0.3066 - val_loss: 2.1692 - val_accuracy: 0.2562
Epoch 103/105
64/64 [==============================] - ETA: 0s - loss: 1.9232 - accuracy: 0.3262
Epoch 103: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 112ms/step - loss: 1.9232 - accuracy: 0.3262 - val_loss: 1.9074 - val_accuracy: 0.3438
Epoch 104/105
64/64 [==============================] - ETA: 0s - loss: 1.9298 - accuracy: 0.3164
Epoch 104: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 110ms/step - loss: 1.9298 - accuracy: 0.3164 - val_loss: 1.6815 - val_accuracy: 0.4000
Epoch 105/105
64/64 [==============================] - ETA: 0s - loss: 1.9441 - accuracy: 0.3018
Epoch 105: val_accuracy did not improve from 0.43125
64/64 [==============================] - 7s 111ms/step - loss: 1.9441 - accuracy: 0.3018 - val_loss: 1.9913 - val_accuracy: 0.2750
********* Training time: 1258.078125 s.
*****************
* Model Summary *
*****************
Model: "resnet50_imagenet"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50 (Functional) (None, 2048) 23587712
flatten_6 (Flatten) (None, 2048) 0
dense_6 (Dense) (None, 12) 24588
=================================================================
Total params: 23,612,300
Trainable params: 4,494,348
Non-trainable params: 19,117,952
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 7s 28ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.5593 0.3536 0.4333 280
anarrhichomenum 0.3430 0.6301 0.4442 319
brevantherum 0.2435 0.5243 0.3325 267
dulcamara 0.2500 0.0032 0.0063 315
herposolanum 0.1957 0.0300 0.0520 300
holophylla 0.3025 0.2537 0.2760 335
lasiocarpa 0.7102 0.4045 0.5155 309
melongena 0.5920 0.4175 0.4897 285
micracantha 0.1489 0.0238 0.0411 294
petota 0.1819 0.7209 0.2905 301
solanum 0.1333 0.0736 0.0948 299
torva 0.2483 0.1250 0.1663 296
accuracy 0.2950 3600
macro avg 0.3257 0.2967 0.2618 3600
weighted avg 0.3250 0.2950 0.2605 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
**************************
* Started at 7869.375... *
**************************
Found 14400 validated image filenames belonging to 12 classes.
Found 3600 validated image filenames belonging to 12 classes.
Epoch 1/105
64/64 [==============================] - ETA: 0s - loss: 10.8682 - accuracy: 0.1182
Epoch 1: val_accuracy improved from -inf to 0.10625, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 11s 131ms/step - loss: 10.8682 - accuracy: 0.1182 - val_loss: 27.3072 - val_accuracy: 0.1063
Epoch 2/105
64/64 [==============================] - ETA: 0s - loss: 3.4741 - accuracy: 0.1201
Epoch 2: val_accuracy improved from 0.10625 to 0.12500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 123ms/step - loss: 3.4741 - accuracy: 0.1201 - val_loss: 7.4388 - val_accuracy: 0.1250
Epoch 3/105
64/64 [==============================] - ETA: 0s - loss: 3.5119 - accuracy: 0.1113
Epoch 3: val_accuracy did not improve from 0.12500
64/64 [==============================] - 7s 113ms/step - loss: 3.5119 - accuracy: 0.1113 - val_loss: 4.0268 - val_accuracy: 0.0812
Epoch 4/105
64/64 [==============================] - ETA: 0s - loss: 3.0805 - accuracy: 0.1562
Epoch 4: val_accuracy improved from 0.12500 to 0.15000, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 120ms/step - loss: 3.0805 - accuracy: 0.1562 - val_loss: 2.7984 - val_accuracy: 0.1500
Epoch 5/105
64/64 [==============================] - ETA: 0s - loss: 3.0443 - accuracy: 0.1562
Epoch 5: val_accuracy improved from 0.15000 to 0.16875, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 126ms/step - loss: 3.0443 - accuracy: 0.1562 - val_loss: 3.3048 - val_accuracy: 0.1688
Epoch 6/105
64/64 [==============================] - ETA: 0s - loss: 2.9945 - accuracy: 0.1533
Epoch 6: val_accuracy improved from 0.16875 to 0.17500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 122ms/step - loss: 2.9945 - accuracy: 0.1533 - val_loss: 2.7783 - val_accuracy: 0.1750
Epoch 7/105
64/64 [==============================] - ETA: 0s - loss: 2.8061 - accuracy: 0.1797
Epoch 7: val_accuracy improved from 0.17500 to 0.18750, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 123ms/step - loss: 2.8061 - accuracy: 0.1797 - val_loss: 3.0053 - val_accuracy: 0.1875
Epoch 8/105
64/64 [==============================] - ETA: 0s - loss: 2.7341 - accuracy: 0.1572
Epoch 8: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 113ms/step - loss: 2.7341 - accuracy: 0.1572 - val_loss: 2.9684 - val_accuracy: 0.1625
Epoch 9/105
64/64 [==============================] - ETA: 0s - loss: 2.7743 - accuracy: 0.1543
Epoch 9: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 113ms/step - loss: 2.7743 - accuracy: 0.1543 - val_loss: 2.5333 - val_accuracy: 0.1500
Epoch 10/105
64/64 [==============================] - ETA: 0s - loss: 2.5824 - accuracy: 0.1738
Epoch 10: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 111ms/step - loss: 2.5824 - accuracy: 0.1738 - val_loss: 2.7350 - val_accuracy: 0.1813
Epoch 11/105
64/64 [==============================] - ETA: 0s - loss: 2.6452 - accuracy: 0.1758
Epoch 11: val_accuracy did not improve from 0.18750
64/64 [==============================] - 7s 113ms/step - loss: 2.6452 - accuracy: 0.1758 - val_loss: 2.9581 - val_accuracy: 0.1437
Epoch 12/105
64/64 [==============================] - ETA: 0s - loss: 2.3938 - accuracy: 0.2207
Epoch 12: val_accuracy improved from 0.18750 to 0.19375, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 122ms/step - loss: 2.3938 - accuracy: 0.2207 - val_loss: 2.5608 - val_accuracy: 0.1937
Epoch 13/105
64/64 [==============================] - ETA: 0s - loss: 2.3898 - accuracy: 0.1807
Epoch 13: val_accuracy improved from 0.19375 to 0.26250, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 120ms/step - loss: 2.3898 - accuracy: 0.1807 - val_loss: 2.2575 - val_accuracy: 0.2625
Epoch 14/105
64/64 [==============================] - ETA: 0s - loss: 2.4357 - accuracy: 0.1943
Epoch 14: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 110ms/step - loss: 2.4357 - accuracy: 0.1943 - val_loss: 2.7509 - val_accuracy: 0.1937
Epoch 15/105
64/64 [==============================] - ETA: 0s - loss: 2.3614 - accuracy: 0.1787
Epoch 15: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 112ms/step - loss: 2.3614 - accuracy: 0.1787 - val_loss: 2.4005 - val_accuracy: 0.2188
Epoch 16/105
64/64 [==============================] - ETA: 0s - loss: 2.3143 - accuracy: 0.2129
Epoch 16: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 110ms/step - loss: 2.3143 - accuracy: 0.2129 - val_loss: 2.9854 - val_accuracy: 0.1375
Epoch 17/105
64/64 [==============================] - ETA: 0s - loss: 2.2797 - accuracy: 0.2041
Epoch 17: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 113ms/step - loss: 2.2797 - accuracy: 0.2041 - val_loss: 2.4325 - val_accuracy: 0.1750
Epoch 18/105
64/64 [==============================] - ETA: 0s - loss: 2.2822 - accuracy: 0.2119
Epoch 18: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 112ms/step - loss: 2.2822 - accuracy: 0.2119 - val_loss: 2.4201 - val_accuracy: 0.2250
Epoch 19/105
64/64 [==============================] - ETA: 0s - loss: 2.3200 - accuracy: 0.2012
Epoch 19: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 111ms/step - loss: 2.3200 - accuracy: 0.2012 - val_loss: 2.7219 - val_accuracy: 0.2000
Epoch 20/105
64/64 [==============================] - ETA: 0s - loss: 2.2928 - accuracy: 0.2227
Epoch 20: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 113ms/step - loss: 2.2928 - accuracy: 0.2227 - val_loss: 2.5263 - val_accuracy: 0.2125
Epoch 21/105
64/64 [==============================] - ETA: 0s - loss: 2.2177 - accuracy: 0.2188
Epoch 21: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 112ms/step - loss: 2.2177 - accuracy: 0.2188 - val_loss: 2.5407 - val_accuracy: 0.1562
Epoch 22/105
64/64 [==============================] - ETA: 0s - loss: 2.2042 - accuracy: 0.2188
Epoch 22: val_accuracy did not improve from 0.26250
64/64 [==============================] - 7s 111ms/step - loss: 2.2042 - accuracy: 0.2188 - val_loss: 2.2118 - val_accuracy: 0.2438
Epoch 23/105
64/64 [==============================] - ETA: 0s - loss: 2.2075 - accuracy: 0.2129
Epoch 23: val_accuracy improved from 0.26250 to 0.32500, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 123ms/step - loss: 2.2075 - accuracy: 0.2129 - val_loss: 2.1733 - val_accuracy: 0.3250
Epoch 24/105
64/64 [==============================] - ETA: 0s - loss: 2.1866 - accuracy: 0.2412
Epoch 24: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 113ms/step - loss: 2.1866 - accuracy: 0.2412 - val_loss: 2.1984 - val_accuracy: 0.2250
Epoch 25/105
64/64 [==============================] - ETA: 0s - loss: 2.1947 - accuracy: 0.2314
Epoch 25: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 110ms/step - loss: 2.1947 - accuracy: 0.2314 - val_loss: 2.4856 - val_accuracy: 0.1813
Epoch 26/105
64/64 [==============================] - ETA: 0s - loss: 2.1538 - accuracy: 0.2490
Epoch 26: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 113ms/step - loss: 2.1538 - accuracy: 0.2490 - val_loss: 2.3231 - val_accuracy: 0.2250
Epoch 27/105
64/64 [==============================] - ETA: 0s - loss: 2.1556 - accuracy: 0.2480
Epoch 27: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 114ms/step - loss: 2.1556 - accuracy: 0.2480 - val_loss: 2.4166 - val_accuracy: 0.1875
Epoch 28/105
64/64 [==============================] - ETA: 0s - loss: 2.1622 - accuracy: 0.2402
Epoch 28: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 111ms/step - loss: 2.1622 - accuracy: 0.2402 - val_loss: 2.8099 - val_accuracy: 0.1250
Epoch 29/105
64/64 [==============================] - ETA: 0s - loss: 2.1635 - accuracy: 0.2363
Epoch 29: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 113ms/step - loss: 2.1635 - accuracy: 0.2363 - val_loss: 2.3098 - val_accuracy: 0.1875
Epoch 30/105
64/64 [==============================] - ETA: 0s - loss: 2.1821 - accuracy: 0.2256
Epoch 30: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 111ms/step - loss: 2.1821 - accuracy: 0.2256 - val_loss: 2.0923 - val_accuracy: 0.2688
Epoch 31/105
64/64 [==============================] - ETA: 0s - loss: 2.1271 - accuracy: 0.2480
Epoch 31: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 111ms/step - loss: 2.1271 - accuracy: 0.2480 - val_loss: 2.0655 - val_accuracy: 0.3250
Epoch 32/105
64/64 [==============================] - ETA: 0s - loss: 2.1230 - accuracy: 0.2549
Epoch 32: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 112ms/step - loss: 2.1230 - accuracy: 0.2549 - val_loss: 2.2836 - val_accuracy: 0.2313
Epoch 33/105
64/64 [==============================] - ETA: 0s - loss: 2.1074 - accuracy: 0.2666
Epoch 33: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 114ms/step - loss: 2.1074 - accuracy: 0.2666 - val_loss: 2.6240 - val_accuracy: 0.1813
Epoch 34/105
64/64 [==============================] - ETA: 0s - loss: 2.1653 - accuracy: 0.2598
Epoch 34: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 110ms/step - loss: 2.1653 - accuracy: 0.2598 - val_loss: 2.2683 - val_accuracy: 0.2625
Epoch 35/105
64/64 [==============================] - ETA: 0s - loss: 2.1643 - accuracy: 0.2529
Epoch 35: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 113ms/step - loss: 2.1643 - accuracy: 0.2529 - val_loss: 2.2263 - val_accuracy: 0.2562
Epoch 36/105
64/64 [==============================] - ETA: 0s - loss: 2.0892 - accuracy: 0.2783
Epoch 36: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 113ms/step - loss: 2.0892 - accuracy: 0.2783 - val_loss: 2.0457 - val_accuracy: 0.3187
Epoch 37/105
64/64 [==============================] - ETA: 0s - loss: 2.1103 - accuracy: 0.2529
Epoch 37: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 112ms/step - loss: 2.1103 - accuracy: 0.2529 - val_loss: 2.1039 - val_accuracy: 0.2500
Epoch 38/105
64/64 [==============================] - ETA: 0s - loss: 2.1531 - accuracy: 0.2500
Epoch 38: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 115ms/step - loss: 2.1531 - accuracy: 0.2500 - val_loss: 2.2437 - val_accuracy: 0.2875
Epoch 39/105
64/64 [==============================] - ETA: 0s - loss: 2.1232 - accuracy: 0.2422
Epoch 39: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 114ms/step - loss: 2.1232 - accuracy: 0.2422 - val_loss: 2.7091 - val_accuracy: 0.1625
Epoch 40/105
64/64 [==============================] - ETA: 0s - loss: 2.0978 - accuracy: 0.2471
Epoch 40: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 111ms/step - loss: 2.0978 - accuracy: 0.2471 - val_loss: 2.0094 - val_accuracy: 0.3187
Epoch 41/105
64/64 [==============================] - ETA: 0s - loss: 2.0968 - accuracy: 0.2705
Epoch 41: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 114ms/step - loss: 2.0968 - accuracy: 0.2705 - val_loss: 2.0660 - val_accuracy: 0.2250
Epoch 42/105
64/64 [==============================] - ETA: 0s - loss: 2.0456 - accuracy: 0.2881
Epoch 42: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 113ms/step - loss: 2.0456 - accuracy: 0.2881 - val_loss: 2.3217 - val_accuracy: 0.2688
Epoch 43/105
64/64 [==============================] - ETA: 0s - loss: 2.1143 - accuracy: 0.2451
Epoch 43: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 112ms/step - loss: 2.1143 - accuracy: 0.2451 - val_loss: 2.3168 - val_accuracy: 0.2438
Epoch 44/105
64/64 [==============================] - ETA: 0s - loss: 2.0516 - accuracy: 0.2764
Epoch 44: val_accuracy did not improve from 0.32500
64/64 [==============================] - 7s 114ms/step - loss: 2.0516 - accuracy: 0.2764 - val_loss: 2.5059 - val_accuracy: 0.2438
Epoch 45/105
64/64 [==============================] - ETA: 0s - loss: 2.0698 - accuracy: 0.2744
Epoch 45: val_accuracy improved from 0.32500 to 0.38125, saving model to P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\resnet50_tl_20230216202451_4.h5
64/64 [==============================] - 8s 121ms/step - loss: 2.0698 - accuracy: 0.2744 - val_loss: 1.8414 - val_accuracy: 0.3812
Epoch 46/105
64/64 [==============================] - ETA: 0s - loss: 2.0698 - accuracy: 0.2803
Epoch 46: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 2.0698 - accuracy: 0.2803 - val_loss: 2.4949 - val_accuracy: 0.1937
Epoch 47/105
64/64 [==============================] - ETA: 0s - loss: 2.0292 - accuracy: 0.2646
Epoch 47: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 2.0292 - accuracy: 0.2646 - val_loss: 2.3341 - val_accuracy: 0.2875
Epoch 48/105
64/64 [==============================] - ETA: 0s - loss: 2.0339 - accuracy: 0.2744
Epoch 48: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 2.0339 - accuracy: 0.2744 - val_loss: 2.2678 - val_accuracy: 0.2750
Epoch 49/105
64/64 [==============================] - ETA: 0s - loss: 2.1082 - accuracy: 0.2471
Epoch 49: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 2.1082 - accuracy: 0.2471 - val_loss: 2.0114 - val_accuracy: 0.3500
Epoch 50/105
64/64 [==============================] - ETA: 0s - loss: 2.1141 - accuracy: 0.2539
Epoch 50: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 2.1141 - accuracy: 0.2539 - val_loss: 2.8449 - val_accuracy: 0.2688
Epoch 51/105
64/64 [==============================] - ETA: 0s - loss: 2.0665 - accuracy: 0.2842
Epoch 51: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 2.0665 - accuracy: 0.2842 - val_loss: 2.2992 - val_accuracy: 0.2188
Epoch 52/105
64/64 [==============================] - ETA: 0s - loss: 1.9940 - accuracy: 0.2920
Epoch 52: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 1.9940 - accuracy: 0.2920 - val_loss: 2.0285 - val_accuracy: 0.2625
Epoch 53/105
64/64 [==============================] - ETA: 0s - loss: 2.0422 - accuracy: 0.2744
Epoch 53: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 112ms/step - loss: 2.0422 - accuracy: 0.2744 - val_loss: 2.5333 - val_accuracy: 0.2438
Epoch 54/105
64/64 [==============================] - ETA: 0s - loss: 2.0237 - accuracy: 0.2949
Epoch 54: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 2.0237 - accuracy: 0.2949 - val_loss: 2.1719 - val_accuracy: 0.2750
Epoch 55/105
64/64 [==============================] - ETA: 0s - loss: 2.0280 - accuracy: 0.2988
Epoch 55: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 2.0280 - accuracy: 0.2988 - val_loss: 2.1162 - val_accuracy: 0.3000
Epoch 56/105
64/64 [==============================] - ETA: 0s - loss: 2.0238 - accuracy: 0.2939
Epoch 56: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 2.0238 - accuracy: 0.2939 - val_loss: 1.9592 - val_accuracy: 0.3187
Epoch 57/105
64/64 [==============================] - ETA: 0s - loss: 2.0376 - accuracy: 0.3154
Epoch 57: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 112ms/step - loss: 2.0376 - accuracy: 0.3154 - val_loss: 2.0958 - val_accuracy: 0.2562
Epoch 58/105
64/64 [==============================] - ETA: 0s - loss: 2.0085 - accuracy: 0.2979
Epoch 58: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 2.0085 - accuracy: 0.2979 - val_loss: 2.1602 - val_accuracy: 0.2750
Epoch 59/105
64/64 [==============================] - ETA: 0s - loss: 2.0016 - accuracy: 0.2822
Epoch 59: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 2.0016 - accuracy: 0.2822 - val_loss: 2.1826 - val_accuracy: 0.3063
Epoch 60/105
64/64 [==============================] - ETA: 0s - loss: 1.9887 - accuracy: 0.3047
Epoch 60: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 1.9887 - accuracy: 0.3047 - val_loss: 2.2969 - val_accuracy: 0.2500
Epoch 61/105
64/64 [==============================] - ETA: 0s - loss: 1.9692 - accuracy: 0.3135
Epoch 61: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 1.9692 - accuracy: 0.3135 - val_loss: 2.0583 - val_accuracy: 0.2812
Epoch 62/105
64/64 [==============================] - ETA: 0s - loss: 2.0474 - accuracy: 0.2725
Epoch 62: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 2.0474 - accuracy: 0.2725 - val_loss: 2.1223 - val_accuracy: 0.2438
Epoch 63/105
64/64 [==============================] - ETA: 0s - loss: 2.0283 - accuracy: 0.2764
Epoch 63: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 2.0283 - accuracy: 0.2764 - val_loss: 2.1590 - val_accuracy: 0.2937
Epoch 64/105
64/64 [==============================] - ETA: 0s - loss: 2.0425 - accuracy: 0.2812
Epoch 64: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 2.0425 - accuracy: 0.2812 - val_loss: 2.0168 - val_accuracy: 0.3313
Epoch 65/105
64/64 [==============================] - ETA: 0s - loss: 2.0154 - accuracy: 0.2871
Epoch 65: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 112ms/step - loss: 2.0154 - accuracy: 0.2871 - val_loss: 2.0075 - val_accuracy: 0.3187
Epoch 66/105
64/64 [==============================] - ETA: 0s - loss: 1.9926 - accuracy: 0.3008
Epoch 66: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 1.9926 - accuracy: 0.3008 - val_loss: 2.0538 - val_accuracy: 0.2875
Epoch 67/105
64/64 [==============================] - ETA: 0s - loss: 2.0447 - accuracy: 0.2910
Epoch 67: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 2.0447 - accuracy: 0.2910 - val_loss: 2.1081 - val_accuracy: 0.2625
Epoch 68/105
64/64 [==============================] - ETA: 0s - loss: 2.0260 - accuracy: 0.2949
Epoch 68: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 2.0260 - accuracy: 0.2949 - val_loss: 2.0824 - val_accuracy: 0.2812
Epoch 69/105
64/64 [==============================] - ETA: 0s - loss: 1.9872 - accuracy: 0.3105
Epoch 69: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 1.9872 - accuracy: 0.3105 - val_loss: 2.5820 - val_accuracy: 0.2625
Epoch 70/105
64/64 [==============================] - ETA: 0s - loss: 1.9876 - accuracy: 0.3125
Epoch 70: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 1.9876 - accuracy: 0.3125 - val_loss: 2.1607 - val_accuracy: 0.2875
Epoch 71/105
64/64 [==============================] - ETA: 0s - loss: 1.9423 - accuracy: 0.3086
Epoch 71: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 112ms/step - loss: 1.9423 - accuracy: 0.3086 - val_loss: 2.4892 - val_accuracy: 0.2688
Epoch 72/105
64/64 [==============================] - ETA: 0s - loss: 1.9992 - accuracy: 0.3018
Epoch 72: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 1.9992 - accuracy: 0.3018 - val_loss: 2.0616 - val_accuracy: 0.2812
Epoch 73/105
64/64 [==============================] - ETA: 0s - loss: 1.9844 - accuracy: 0.3008
Epoch 73: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 1.9844 - accuracy: 0.3008 - val_loss: 2.1092 - val_accuracy: 0.3063
Epoch 74/105
64/64 [==============================] - ETA: 0s - loss: 2.0088 - accuracy: 0.2979
Epoch 74: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 2.0088 - accuracy: 0.2979 - val_loss: 2.3123 - val_accuracy: 0.2875
Epoch 75/105
64/64 [==============================] - ETA: 0s - loss: 1.9405 - accuracy: 0.3203
Epoch 75: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 1.9405 - accuracy: 0.3203 - val_loss: 2.0611 - val_accuracy: 0.3000
Epoch 76/105
64/64 [==============================] - ETA: 0s - loss: 1.9830 - accuracy: 0.3027
Epoch 76: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 1.9830 - accuracy: 0.3027 - val_loss: 2.0765 - val_accuracy: 0.2688
Epoch 77/105
64/64 [==============================] - ETA: 0s - loss: 1.9775 - accuracy: 0.3174
Epoch 77: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 1.9775 - accuracy: 0.3174 - val_loss: 2.1275 - val_accuracy: 0.2688
Epoch 78/105
64/64 [==============================] - ETA: 0s - loss: 1.9616 - accuracy: 0.3184
Epoch 78: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 1.9616 - accuracy: 0.3184 - val_loss: 2.2481 - val_accuracy: 0.2375
Epoch 79/105
64/64 [==============================] - ETA: 0s - loss: 2.0061 - accuracy: 0.3105
Epoch 79: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 2.0061 - accuracy: 0.3105 - val_loss: 2.1111 - val_accuracy: 0.3187
Epoch 80/105
64/64 [==============================] - ETA: 0s - loss: 1.9687 - accuracy: 0.2842
Epoch 80: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 113ms/step - loss: 1.9687 - accuracy: 0.2842 - val_loss: 2.3901 - val_accuracy: 0.2125
Epoch 81/105
64/64 [==============================] - ETA: 0s - loss: 1.9581 - accuracy: 0.3086
Epoch 81: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 111ms/step - loss: 1.9581 - accuracy: 0.3086 - val_loss: 2.6543 - val_accuracy: 0.2562
Epoch 82/105
64/64 [==============================] - ETA: 0s - loss: 2.0318 - accuracy: 0.2881
Epoch 82: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 2.0318 - accuracy: 0.2881 - val_loss: 2.0451 - val_accuracy: 0.3125
Epoch 83/105
64/64 [==============================] - ETA: 0s - loss: 1.9367 - accuracy: 0.3340
Epoch 83: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 112ms/step - loss: 1.9367 - accuracy: 0.3340 - val_loss: 2.0536 - val_accuracy: 0.3125
Epoch 84/105
64/64 [==============================] - ETA: 0s - loss: 1.9776 - accuracy: 0.3096
Epoch 84: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 1.9776 - accuracy: 0.3096 - val_loss: 2.1760 - val_accuracy: 0.2750
Epoch 85/105
64/64 [==============================] - ETA: 0s - loss: 1.9528 - accuracy: 0.3232
Epoch 85: val_accuracy did not improve from 0.38125
64/64 [==============================] - 7s 110ms/step - loss: 1.9528 - accuracy: 0.3232 - val_loss: 2.2718 - val_accuracy: 0.2625
Restoring model weights from the end of the best epoch: 45.
Epoch 85: early stopping
********* Training time: 1027.734375 s.
*****************
* Model Summary *
*****************
Model: "resnet50_imagenet"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50 (Functional) (None, 2048) 23587712
flatten_7 (Flatten) (None, 2048) 0
dense_7 (Dense) (None, 12) 24588
=================================================================
Total params: 23,612,300
Trainable params: 4,494,348
Non-trainable params: 19,117,952
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 3600 validated image filenames belonging to 12 classes.
225/225 [==============================] - 7s 28ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.5238 0.2991 0.3808 331
anarrhichomenum 0.6615 0.4044 0.5019 319
brevantherum 0.2451 0.4038 0.3051 312
dulcamara 0.2345 0.2046 0.2186 259
herposolanum 0.2295 0.0513 0.0838 273
holophylla 0.2564 0.5411 0.3479 316
lasiocarpa 0.5431 0.6815 0.6045 314
melongena 0.4821 0.5364 0.5078 302
micracantha 0.2091 0.2921 0.2437 315
petota 0.3618 0.5571 0.4387 289
solanum 0.4000 0.0443 0.0797 271
torva 0.2427 0.0836 0.1244 299
accuracy 0.3494 3600
macro avg 0.3658 0.3416 0.3197 3600
weighted avg 0.3702 0.3494 0.3268 3600
********************
* Confusion Matrix *
********************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
******************************* * Mean metrics across 4 folds * *******************************
| 0 | |
|---|---|
| accuracy | 0.328958 |
| acanthophora.precision | 0.489129 |
| acanthophora.recall | 0.412287 |
| acanthophora.f1-score | 0.435882 |
| acanthophora.support | 298.250000 |
| anarrhichomenum.precision | 0.485494 |
| anarrhichomenum.recall | 0.544784 |
| anarrhichomenum.f1-score | 0.492074 |
| anarrhichomenum.support | 318.750000 |
| brevantherum.precision | 0.181580 |
| brevantherum.recall | 0.350126 |
| brevantherum.f1-score | 0.238469 |
| brevantherum.support | 288.000000 |
| dulcamara.precision | 0.253097 |
| dulcamara.recall | 0.123761 |
| dulcamara.f1-score | 0.120356 |
| dulcamara.support | 295.500000 |
| herposolanum.precision | 0.318281 |
| herposolanum.recall | 0.108918 |
| herposolanum.f1-score | 0.099920 |
| herposolanum.support | 285.000000 |
| holophylla.precision | 0.327192 |
| holophylla.recall | 0.379414 |
| holophylla.f1-score | 0.336098 |
| holophylla.support | 304.750000 |
| lasiocarpa.precision | 0.643389 |
| lasiocarpa.recall | 0.500517 |
| lasiocarpa.f1-score | 0.533530 |
| lasiocarpa.support | 312.500000 |
| melongena.precision | 0.498678 |
| melongena.recall | 0.544865 |
| melongena.f1-score | 0.512118 |
| melongena.support | 298.000000 |
| micracantha.precision | 0.214898 |
| micracantha.recall | 0.182378 |
| micracantha.f1-score | 0.183145 |
| micracantha.support | 300.000000 |
| petota.precision | 0.267569 |
| petota.recall | 0.615522 |
| petota.f1-score | 0.360635 |
| petota.support | 304.500000 |
| solanum.precision | 0.272677 |
| solanum.recall | 0.085386 |
| solanum.f1-score | 0.120557 |
| solanum.support | 294.500000 |
| torva.precision | 0.122760 |
| torva.recall | 0.052153 |
| torva.f1-score | 0.072668 |
| torva.support | 300.250000 |
| macro avg.precision | 0.339562 |
| macro avg.recall | 0.325009 |
| macro avg.f1-score | 0.292121 |
| macro avg.support | 3600.000000 |
| weighted avg.precision | 0.341608 |
| weighted avg.recall | 0.328958 |
| weighted avg.f1-score | 0.295240 |
| weighted avg.support | 3600.000000 |
CPU times: total: 1h 15min 3s Wall time: 45min 29s
Given the previous results, we'll move on to do a grid search to find the best hyperparameters for the model.
bal_training_set, bal_val_set = split_balanced_dataset(balanced_training_data, 0.1, is_verbose=False)
print(f">>>Train DF: {bal_training_set.shape}\n Val DF: {bal_val_set.shape}")
>>>Train DF: (16200, 6) Val DF: (1800, 6)
"""
fit(
x=None,
y=None,
batch_size=None,
epochs=1,
verbose='auto',
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False
)
https://keras.io/guides/keras_tuner/getting_started/
https://keras.io/api/keras_tuner/hyperparameters/#choice-method
https://keras.io/guides/keras_tuner/custom_tuner/
"""
import keras_tuner
class MyBestArchitecture(keras_tuner.HyperModel):
def build(self, hp):
# Hyperparameters to tune
model_pooling = hp.Choice("pooling",
["avg", "max"],
default="max")
learning_rate = hp.Choice("learning_rate",
[0.001, 0.01, 0.003, 0.03, 0.006, 0.06],
default=0.01)
trainable_layers_limit = hp.Choice("trainable_layers",
[-1, -3, -4, -5],
default=-1)
optimizer_name = hp.Choice("optimizer",
["sgd", "adam"],
default="sgd")
vgg16_tl_model = tf.keras.applications.vgg16.VGG16(
include_top=True,
weights='imagenet',
pooling=model_pooling,
classifier_activation='softmax',
)
# Change the output prediction layer to support the 12 classes instead of the 1000 classes in ImageNet
output = vgg16_tl_model.layers[-2].output
predictions = tf.keras.layers.Dense(NUM_CLASSES,
activation="softmax")(output)
vgg16_tl_model = tf.keras.Model(inputs = vgg16_tl_model.input,
outputs = predictions)
# Freeze all layers but the FC and output
for layer in (vgg16_tl_model.layers)[:trainable_layers_limit]:
layer.trainable = False
# Add an optimizer
with hp.conditional_scope("optimizer", ["adam"]):
if optimizer_name == "adam":
optimizer = tf.optimizers.Adam(learning_rate=learning_rate)
with hp.conditional_scope("optimizer", ["sgd"]):
if optimizer_name == "sgd":
optimizer = tf.optimizers.SGD(learning_rate=learning_rate)
vgg16_tl_model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
return vgg16_tl_model
def fit(self, hp, model, x, y, **kwargs):
# More hyperparameter to tune
batch_size = hp.Choice("batch_size", [16, 32, 64, 128], default=32)
# Add a progress bar and save checkpoints
vgg16_tl_callbacks = [
# create_model_checkpoint(os.path.join(DATA_ROOT_LOCATION, f"vgg16_hyperparam_{TRAINING_RUN_ID}.h5")),
tf.keras.callbacks.ProgbarLogger(
count_mode = 'steps',
stateful_metrics = None
),
tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy',
min_delta=0,
patience=64,
verbose=1,
mode='auto',
restore_best_weights=True
)
]
preproc_func = tf.keras.applications.vgg16.preprocess_input
train_data_gen = get_train_generator(preproc_func, bal_training_set, batch_size)
val_data_gen = get_val_generator(bal_val_set, preproc_func, batch_size)
kwargs["callbacks"].extend(vgg16_tl_callbacks)
fit_params = {
"x": train_data_gen,
"validation_data": val_data_gen,
"steps_per_epoch": 64,
"validation_steps": 10,
"workers": 10,
}
tf.keras.backend.clear_session()
with tf.device(TRAINING_DEVICE_NAME):
history = model.fit(**fit_params, **kwargs)
return history
%%time
random_tuner = keras_tuner.RandomSearch(
MyBestArchitecture(),
objective="val_accuracy",
max_trials=4,
seed=RANDOM_SEED,
overwrite=True,
directory=os.path.join(DATA_ROOT_LOCATION, "vgg16tl_hypertuner"),
project_name="vgg16tl",
)
random_tuner.search(None, None, epochs=104)
display(random_tuner.search_space_summary())
display(random_tuner.results_summary())
Trial 4 Complete [00h 17m 14s]
val_accuracy: 0.5249999761581421
Best val_accuracy So Far: 0.840624988079071
Total elapsed time: 01h 00m 32s
INFO:tensorflow:Oracle triggered exit
Search space summary
Default search space size: 5
pooling (Choice)
{'default': 'max', 'conditions': [], 'values': ['avg', 'max'], 'ordered': False}
learning_rate (Choice)
{'default': 0.01, 'conditions': [], 'values': [0.001, 0.01, 0.003, 0.03, 0.006, 0.06], 'ordered': True}
trainable_layers (Choice)
{'default': -1, 'conditions': [], 'values': [-1, -3, -4, -5], 'ordered': True}
optimizer (Choice)
{'default': 'sgd', 'conditions': [], 'values': ['sgd', 'adam'], 'ordered': False}
batch_size (Choice)
{'default': 32, 'conditions': [], 'values': [16, 32, 64, 128], 'ordered': True}
None
Results summary Results in P:\CODE\ITESM\tesis-dataset-downloader\solanum_output\vgg16tl_hypertuner\vgg16tl Showing 10 best trials <keras_tuner.engine.objective.Objective object at 0x000001E7A987D9F0> Trial summary Hyperparameters: pooling: max learning_rate: 0.03 trainable_layers: -1 optimizer: adam batch_size: 32 Score: 0.840624988079071 Trial summary Hyperparameters: pooling: max learning_rate: 0.003 trainable_layers: -3 optimizer: adam batch_size: 64 Score: 0.6343749761581421 Trial summary Hyperparameters: pooling: max learning_rate: 0.001 trainable_layers: -4 optimizer: sgd batch_size: 64 Score: 0.5249999761581421 Trial summary Hyperparameters: pooling: max learning_rate: 0.03 trainable_layers: -1 optimizer: sgd batch_size: 64 Score: 0.515625
None
CPU times: total: 4h 44min Wall time: 1h 35s
# Fetch the best model that was found
# Get the top 1 models.
models = random_tuner.get_best_models(num_models=1)
best_model = models[0]
best_model.summary()
print(str(random_tuner.get_best_hyperparameters()[0].values))
val_gen = get_val_generator(bal_val_set,
tf.keras.applications.vgg16.preprocess_input,
batch_size=32)
val_model_predictions = best_model.predict(val_gen,
#steps=val_gen.n // (val_gen.batch_size + 1)
#steps=10
)
pred_report = print_dataset_prediction_report(val_model_predictions,
val_gen.classes,
get_class_names(val_gen))
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
dense (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 49,164
Non-trainable params: 134,260,544
_________________________________________________________________
{'pooling': 'max', 'learning_rate': 0.03, 'trainable_layers': -1, 'optimizer': 'adam', 'batch_size': 32}
Found 1800 validated image filenames belonging to 12 classes.
57/57 [==============================] - 3s 53ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.6486 0.4800 0.5517 150
anarrhichomenum 0.4354 0.8533 0.5766 150
brevantherum 0.3684 0.7933 0.5032 150
dulcamara 1.0000 0.0133 0.0263 150
herposolanum 0.4957 0.3867 0.4345 150
holophylla 0.8571 0.0400 0.0764 150
lasiocarpa 0.3590 0.9933 0.5274 150
melongena 0.6768 0.4467 0.5382 150
micracantha 0.5778 0.1733 0.2667 150
petota 0.7895 0.1000 0.1775 150
solanum 0.3268 0.7733 0.4594 150
torva 0.3077 0.0267 0.0491 150
accuracy 0.4233 1800
macro avg 0.5702 0.4233 0.3489 1800
weighted avg 0.5702 0.4233 0.3489 1800
********************
* Confusion Matrix *
********************
# Let's focus on oversampling to 1500
MIN_NUM_CLASSES = 1500
sections_to_oversample = list(training_count_per_section[training_count_per_section["count"] < MIN_NUM_CLASSES]["section"].values)
oversampler = RandomOverSampler(sampling_strategy={k: MIN_NUM_CLASSES for k in sections_to_oversample},
random_state=RANDOM_SEED)
final_training_set = imbsample(oversampler, training_set)
# Shuffle the data
final_training_set = final_training_set.sample(frac=1,
random_state=RANDOM_SEED).reset_index(drop=True)
final_train, final_val = split_balanced_dataset(final_training_set, is_verbose=False)
print_marquee("Training")
# display(final_train.head(10))
display(count_rows_by_column(final_train, "section"))
print_marquee("Validation")
# display(final_train.head(10))
display(count_rows_by_column(final_val, "section"))
************ * Training * ************
| section | count | |
|---|---|---|
| 9 | petota | 1404 |
| 0 | acanthophora | 1350 |
| 1 | anarrhichomenum | 1350 |
| 2 | brevantherum | 1350 |
| 3 | dulcamara | 1350 |
| 4 | herposolanum | 1350 |
| 5 | holophylla | 1350 |
| 6 | lasiocarpa | 1350 |
| 7 | melongena | 1350 |
| 8 | micracantha | 1350 |
| 10 | solanum | 1350 |
| 11 | torva | 1350 |
************** * Validation * **************
| section | count | |
|---|---|---|
| 9 | petota | 156 |
| 0 | acanthophora | 150 |
| 1 | anarrhichomenum | 150 |
| 2 | brevantherum | 150 |
| 3 | dulcamara | 150 |
| 4 | herposolanum | 150 |
| 5 | holophylla | 150 |
| 6 | lasiocarpa | 150 |
| 7 | melongena | 150 |
| 8 | micracantha | 150 |
| 10 | solanum | 150 |
| 11 | torva | 150 |
"""
Best Params found:
Trial summary
Hyperparameters:
pooling: max
learning_rate: 0.03
trainable_layers: -1
optimizer: adam
batch_size: 32
Score: 0.840624988079071
"""
LEARNING_RATE = 0.03
BATCH_SIZE = 32
def train_using_best_params(train_df,
val_df,
test_df = None,
is_binary_class: bool=False,
print_loss_graphs: bool=True,
print_model_info: bool=True):
preproc_func = tf.keras.applications.vgg16.preprocess_input
num_classes = len(train_df["section"].unique())
classifier_activation = None if is_binary_class else 'softmax'
loss = 'binary_crossentropy' if is_binary_class else 'categorical_crossentropy'
train_data_gen = get_train_generator(preproc_func,
train_df,
batch_size=BATCH_SIZE)
"""
Get the ImageNet weights and prevent the training of the network up until the FC layers
We will include the top even though we will still perform fine-tuning on
the last 3 Dense layers (the Fully-Connected layers) because we want to
have their weights not initialized to random values but to ImageNet weights.
The reason for this is because during experimentation it was found that
having the FC layers initialized to random weights prevented the Vgg16
network from learning at all.
"""
vgg16_imagenet = tf.keras.applications.vgg16.VGG16(
include_top=True,
weights='imagenet',
input_shape=(224, 224, 3),
)
# Freeze all ImageNet layers, we will do fine tuning in new Dense layers
# this will allow us to fine tune with Dense layers pre-initialized to ImageNet weights
for layer in vgg16_imagenet.layers:
layer.trainable = False
vgg16_tl_model = tf.keras.Sequential(
[
# Remove the 1000 ImageNet prediction layer to accomodate the new prediction layers
*vgg16_imagenet.layers[:-1],
tf.keras.layers.Dense(num_classes, activation="softmax"),
]
)
# Add an optimizer
vgg16_tl_model.compile(optimizer=tf.optimizers.Adam(learning_rate=LEARNING_RATE),
loss=loss,
metrics=['accuracy'])
# Add a progress bar and save checkpoints
vgg16_tl_callbacks = [
# create_model_checkpoint(os.path.join(DATA_ROOT_LOCATION, f"vgg16_tl_final.h5")),
tf.keras.callbacks.ProgbarLogger(
count_mode = 'steps',
stateful_metrics = None
),
tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=100,
verbose=0,
mode='auto',
restore_best_weights=True
)
]
fit_params = {
"x": train_data_gen,
"epochs": 500,
"callbacks": vgg16_tl_callbacks,
"steps_per_epoch": 64,
"batch_size": BATCH_SIZE,
"workers": 10,
"verbose": 0,
"validation_data": get_val_generator(val_df,
preprocessing_func=preproc_func,
batch_size=BATCH_SIZE)
}
with tf.device(TRAINING_DEVICE_NAME):
vgg16_tl_training_history = vgg16_tl_model.fit(**fit_params)
# Pass the raw DataFrame so that the ImageDataGen is generated properly
# We plug the test_set in the validation_data field to piggy back on the
# code that evaluates how well the final model is predicting output
fit_params["validation_data"] = test_df if test_df else val_df
_, pred_report = evaluate_model(vgg16_tl_model,
vgg16_tl_training_history,
fit_params=fit_params,
preproc_func=preproc_func,
print_loss_graphs=print_loss_graphs,
print_model_info=print_model_info)
return vgg16_tl_model, vgg16_tl_training_history
%%time
train_using_best_params(final_train, final_val)
Found 16254 validated image filenames belonging to 12 classes.
Found 1806 validated image filenames belonging to 12 classes.
*****************
* Model Summary *
*****************
Model: "sequential_18"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
dense_42 (Dense) (None, 12) 49164
=================================================================
Total params: 134,309,708
Trainable params: 49,164
Non-trainable params: 134,260,544
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 12 classes.
57/57 [==============================] - 3s 53ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.7778 0.5600 0.6512 150
anarrhichomenum 0.7453 0.8000 0.7717 150
brevantherum 0.5241 0.6533 0.5816 150
dulcamara 0.4884 0.1400 0.2176 150
herposolanum 0.2528 0.7533 0.3786 150
holophylla 0.0000 0.0000 0.0000 150
lasiocarpa 0.9867 0.4933 0.6578 150
melongena 0.0000 0.0000 0.0000 150
micracantha 0.4966 0.4800 0.4881 150
petota 0.3043 0.8077 0.4421 156
solanum 0.3838 0.5067 0.4368 150
torva 0.5000 0.0933 0.1573 150
accuracy 0.4419 1806
macro avg 0.4550 0.4406 0.3986 1806
weighted avg 0.4545 0.4419 0.3987 1806
********************
* Confusion Matrix *
********************
C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
*********************************** * Confusion Matrix as percentages * ***********************************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
CPU times: total: 1h 29min 53s Wall time: 19min 16s
We had very bad results for many of the classes, so in an effort to troubleshoot what are the most problematic classes, we'll perform a One-vs-many binary classification approach to see what classes need to be worked on.
%%time
def transform_to_binary_class(df, section_name: str):
df = df.copy()
df.loc[df["section"] != section_name, "section"] = f"not-{section_name}"
return df
for section_name in np.sort(final_train["section"].unique()):
print_marquee(f"{section_name} vs. many")
one_vs_many_train_df = transform_to_binary_class(final_train, section_name)
one_vs_many_val_df = transform_to_binary_class(final_val, section_name)
train_using_best_params(one_vs_many_train_df,
one_vs_many_val_df,
is_binary_class=True,
print_loss_graphs=False,
print_model_info=False)
*************************
* acanthophora vs. many *
*************************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 51ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.5932 0.9333 0.7254 150
not-acanthophora 0.9936 0.9420 0.9671 1656
accuracy 0.9413 1806
macro avg 0.7934 0.9377 0.8463 1806
weighted avg 0.9604 0.9413 0.9471 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
****************************
* anarrhichomenum vs. many *
****************************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 54ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
anarrhichomenum 0.7029 0.8200 0.7569 150
not-anarrhichomenum 0.9834 0.9686 0.9760 1656
accuracy 0.9563 1806
macro avg 0.8432 0.8943 0.8664 1806
weighted avg 0.9601 0.9563 0.9578 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
*************************
* brevantherum vs. many *
*************************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 55ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
brevantherum 1.0000 0.0067 0.0132 150
not-brevantherum 0.9175 1.0000 0.9569 1656
accuracy 0.9175 1806
macro avg 0.9587 0.5033 0.4851 1806
weighted avg 0.9243 0.9175 0.8786 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
**********************
* dulcamara vs. many *
**********************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 54ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
dulcamara 0.0000 0.0000 0.0000 150
not-dulcamara 0.9169 1.0000 0.9567 1656
accuracy 0.9169 1806
macro avg 0.4585 0.5000 0.4783 1806
weighted avg 0.8408 0.9169 0.8772 1806
********************
* Confusion Matrix *
********************
C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) C:\Users\Charlie\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\metrics\_classification.py:1344: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
*********************************** * Confusion Matrix as percentages * ***********************************
*************************
* herposolanum vs. many *
*************************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 54ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
herposolanum 0.4943 0.5733 0.5309 150
not-herposolanum 0.9608 0.9469 0.9538 1656
accuracy 0.9158 1806
macro avg 0.7275 0.7601 0.7423 1806
weighted avg 0.9220 0.9158 0.9186 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
***********************
* holophylla vs. many *
***********************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 53ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
holophylla 0.4591 0.4867 0.4725 150
not-holophylla 0.9532 0.9481 0.9507 1656
accuracy 0.9097 1806
macro avg 0.7062 0.7174 0.7116 1806
weighted avg 0.9122 0.9097 0.9109 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
***********************
* lasiocarpa vs. many *
***********************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 53ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
lasiocarpa 0.8679 0.6133 0.7187 150
not-lasiocarpa 0.9659 0.9915 0.9785 1656
accuracy 0.9601 1806
macro avg 0.9169 0.8024 0.8486 1806
weighted avg 0.9577 0.9601 0.9570 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
**********************
* melongena vs. many *
**********************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 55ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
melongena 0.9070 0.2600 0.4041 150
not-melongena 0.9370 0.9976 0.9664 1656
accuracy 0.9363 1806
macro avg 0.9220 0.6288 0.6853 1806
weighted avg 0.9345 0.9363 0.9197 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
************************
* micracantha vs. many *
************************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 54ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
micracantha 0.8571 0.1600 0.2697 150
not-micracantha 0.9291 0.9976 0.9621 1656
accuracy 0.9280 1806
macro avg 0.8931 0.5788 0.6159 1806
weighted avg 0.9232 0.9280 0.9046 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
*******************
* petota vs. many *
*******************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 54ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
not-petota 0.9412 0.9794 0.9599 1650
petota 0.6180 0.3526 0.4490 156
accuracy 0.9252 1806
macro avg 0.7796 0.6660 0.7044 1806
weighted avg 0.9133 0.9252 0.9158 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
********************
* solanum vs. many *
********************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 52ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
not-solanum 0.9220 0.9988 0.9588 1656
solanum 0.8333 0.0667 0.1235 150
accuracy 0.9214 1806
macro avg 0.8776 0.5327 0.5411 1806
weighted avg 0.9146 0.9214 0.8895 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
******************
* torva vs. many *
******************
Found 16254 validated image filenames belonging to 2 classes.
Found 1806 validated image filenames belonging to 2 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1806 validated image filenames belonging to 2 classes.
57/57 [==============================] - 3s 54ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
not-torva 0.9511 0.9269 0.9388 1656
torva 0.3698 0.4733 0.4152 150
accuracy 0.8893 1806
macro avg 0.6604 0.7001 0.6770 1806
weighted avg 0.9028 0.8893 0.8953 1806
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
CPU times: total: 16h 9min 14s Wall time: 4h 1min 42s
%%time
def transform_to_binary_class(df, section_name: str):
df = df.copy()
df.loc[df["section"] != section_name, "section"] = f"not-{section_name}"
return df
for section_name in np.sort(final_train["section"].unique()):
print_marquee(f"{section_name} vs. many")
one_vs_many_train_df = transform_to_binary_class(final_train, section_name)
one_vs_many_val_df = transform_to_binary_class(final_val, section_name)
train_using_best_params(one_vs_many_train_df,
one_vs_many_val_df,
is_binary_class=True,
print_loss_graphs=False,
print_model_info=False)
Output hidden; open in https://colab.research.google.com to view.
As we can see there are a couple of sections that are not being recognized by the neural network at all and might be introducing too much noise, let's now train a model without the problematic sections and see how the model behaves, we can evaluate later why the problematic sections are deemed so and propose how we could attack that problem.
For this part, we'll remove the classes that were not identified at all and those that had less than 40% F1-Score.
problematic_sections = [
"dulcamara",
"herposolanum",
"holophylla",
"melongena",
"solanum",
"torva"
]
print_marquee("Non-problematic training count")
no_problematic_train_df = final_train[~final_train.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_train_df, "section"))
print_marquee("Non-problematic validation count")
no_problematic_val_df = final_val[~final_val.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_val_df, "section"))
********************************** * Non-problematic training count * **********************************
| section | count | |
|---|---|---|
| 8 | petota | 1404 |
| 0 | acanthophora | 1350 |
| 1 | anarrhichomenum | 1350 |
| 2 | brevantherum | 1350 |
| 3 | dulcamara | 1350 |
| 4 | herposolanum | 1350 |
| 5 | lasiocarpa | 1350 |
| 6 | melongena | 1350 |
| 7 | micracantha | 1350 |
| 9 | solanum | 1350 |
************************************ * Non-problematic validation count * ************************************
| section | count | |
|---|---|---|
| 8 | petota | 156 |
| 0 | acanthophora | 150 |
| 1 | anarrhichomenum | 150 |
| 2 | brevantherum | 150 |
| 3 | dulcamara | 150 |
| 4 | herposolanum | 150 |
| 5 | lasiocarpa | 150 |
| 6 | melongena | 150 |
| 7 | micracantha | 150 |
| 9 | solanum | 150 |
%%time
problematic_sections = [
"brevantherum",
"dulcamara",
# "herposolanum",
# "holophylla",
# "melongena",
"solanum",
"micracantha",
# "torva"
]
print_marquee("Non-problematic training count")
no_problematic_train_df = final_train[~final_train.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_train_df, "section"))
print_marquee("Non-problematic validation count")
no_problematic_val_df = final_val[~final_val.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_val_df, "section"))
train_using_best_params(no_problematic_train_df,
no_problematic_val_df,
print_loss_graphs=True,
print_model_info=False)
********************************** * Non-problematic training count * **********************************
| section | count | |
|---|---|---|
| 6 | petota | 1404 |
| 0 | acanthophora | 1350 |
| 1 | anarrhichomenum | 1350 |
| 2 | herposolanum | 1350 |
| 3 | holophylla | 1350 |
| 4 | lasiocarpa | 1350 |
| 5 | melongena | 1350 |
| 7 | torva | 1350 |
************************************ * Non-problematic validation count * ************************************
| section | count | |
|---|---|---|
| 6 | petota | 156 |
| 0 | acanthophora | 150 |
| 1 | anarrhichomenum | 150 |
| 2 | herposolanum | 150 |
| 3 | holophylla | 150 |
| 4 | lasiocarpa | 150 |
| 5 | melongena | 150 |
| 7 | torva | 150 |
Found 10854 validated image filenames belonging to 8 classes.
Found 1206 validated image filenames belonging to 8 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 1206 validated image filenames belonging to 8 classes.
38/38 [==============================] - 2s 51ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.5403 0.8933 0.6734 150
anarrhichomenum 1.0000 0.0667 0.1250 150
herposolanum 0.5000 0.6333 0.5588 150
holophylla 0.6341 0.6933 0.6624 150
lasiocarpa 0.8943 0.7333 0.8059 150
melongena 0.9492 0.3733 0.5359 150
petota 0.3434 0.8718 0.4928 156
torva 0.7500 0.0800 0.1446 150
accuracy 0.5448 1206
macro avg 0.7014 0.5431 0.4998 1206
weighted avg 0.6996 0.5448 0.4998 1206
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
CPU times: total: 54min 19s Wall time: 11min 44s
%%time
problematic_sections = [
"dulcamara",
"herposolanum",
"holophylla",
"melongena",
"solanum",
"torva"
]
print_marquee("Non-problematic training count")
no_problematic_train_df = final_train[~final_train.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_train_df, "section"))
print_marquee("Non-problematic validation count")
no_problematic_val_df = final_val[~final_val.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_val_df, "section"))
vgg16_m, vgg16_h = train_using_best_params(no_problematic_train_df,
no_problematic_val_df,
print_loss_graphs=True,
print_model_info=False)
********************************** * Non-problematic training count * **********************************
| section | count | |
|---|---|---|
| 5 | petota | 1404 |
| 0 | acanthophora | 1350 |
| 1 | anarrhichomenum | 1350 |
| 2 | brevantherum | 1350 |
| 3 | lasiocarpa | 1350 |
| 4 | micracantha | 1350 |
************************************ * Non-problematic validation count * ************************************
| section | count | |
|---|---|---|
| 5 | petota | 156 |
| 0 | acanthophora | 150 |
| 1 | anarrhichomenum | 150 |
| 2 | brevantherum | 150 |
| 3 | lasiocarpa | 150 |
| 4 | micracantha | 150 |
Found 8154 validated image filenames belonging to 6 classes.
Found 906 validated image filenames belonging to 6 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 906 validated image filenames belonging to 6 classes.
29/29 [==============================] - 2s 53ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.7333 0.9533 0.8290 150
anarrhichomenum 0.8630 0.8400 0.8514 150
brevantherum 0.8506 0.8733 0.8618 150
lasiocarpa 1.0000 0.6067 0.7552 150
micracantha 0.8661 0.7333 0.7942 150
petota 0.6943 0.8590 0.7679 156
accuracy 0.8113 906
macro avg 0.8346 0.8109 0.8099 906
weighted avg 0.8336 0.8113 0.8096 906
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
CPU times: total: 1h 58min 25s Wall time: 23min 16s
# vgg16_m.save(os.path.join(DATA_ROOT_LOCATION, f"vgg16_tl_best_no_problematic_sections_80acc.h5"))
no_problematic_test_df = test_set[~test_set.section.isin(problematic_sections)]
fit_params = {
"epochs": 500,
"steps_per_epoch": 64,
"batch_size": BATCH_SIZE,
"workers": 10,
"verbose": 0,
"validation_data": no_problematic_test_df
}
with tf.device(TRAINING_DEVICE_NAME):
_, pred_report = evaluate_model(vgg16_m,
vgg16_h,
fit_params=fit_params,
preproc_func=tf.keras.applications.vgg16.preprocess_input,
print_loss_graphs=True,
print_model_info=True)
*****************
* Model Summary *
*****************
Model: "sequential_6"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
dense_6 (Dense) (None, 6) 24582
=================================================================
Total params: 134,285,126
Trainable params: 24,582
Non-trainable params: 134,260,544
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 310 validated image filenames belonging to 6 classes.
10/10 [==============================] - 4s 434ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.3333 0.7000 0.4516 10
anarrhichomenum 0.0870 0.2000 0.1212 10
brevantherum 0.8939 0.7284 0.8027 81
lasiocarpa 1.0000 0.5556 0.7143 18
micracantha 0.2174 0.2778 0.2439 18
petota 0.8743 0.8439 0.8588 173
accuracy 0.7387 310
macro avg 0.5676 0.5509 0.5321 310
weighted avg 0.8057 0.7387 0.7631 310
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
%%time
problematic_sections = [
"dulcamara",
"herposolanum",
"holophylla",
"melongena",
"solanum",
"torva"
]
print_marquee("Non-problematic training count")
no_problematic_train_df = final_train[~final_train.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_train_df, "section"))
print_marquee("Non-problematic validation count")
no_problematic_val_df = final_val[~final_val.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_val_df, "section"))
train_using_best_params(no_problematic_train_df,
no_problematic_val_df,
print_loss_graphs=True,
print_model_info=False)
********************************** * Non-problematic training count * **********************************
| section | count | |
|---|---|---|
| 5 | petota | 1404 |
| 0 | acanthophora | 1350 |
| 1 | anarrhichomenum | 1350 |
| 2 | brevantherum | 1350 |
| 3 | lasiocarpa | 1350 |
| 4 | micracantha | 1350 |
************************************ * Non-problematic validation count * ************************************
| section | count | |
|---|---|---|
| 5 | petota | 156 |
| 0 | acanthophora | 150 |
| 1 | anarrhichomenum | 150 |
| 2 | brevantherum | 150 |
| 3 | lasiocarpa | 150 |
| 4 | micracantha | 150 |
Found 8154 validated image filenames belonging to 6 classes.
Found 906 validated image filenames belonging to 6 classes.
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 906 validated image filenames belonging to 6 classes.
29/29 [==============================] - 2s 50ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.9071 0.8467 0.8759 150
anarrhichomenum 0.5952 1.0000 0.7463 150
brevantherum 0.9085 0.8600 0.8836 150
lasiocarpa 0.9489 0.8667 0.9059 150
micracantha 0.7391 0.9067 0.8144 150
petota 0.9412 0.3077 0.4638 156
accuracy 0.7947 906
macro avg 0.8400 0.7979 0.7816 906
weighted avg 0.8407 0.7947 0.7795 906
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
CPU times: total: 2h 5min 47s Wall time: 25min 1s
%%time
problematic_sections = [
"dulcamara",
"herposolanum",
"holophylla",
"melongena",
"solanum",
"torva"
]
print_marquee("Non-problematic training count")
no_problematic_train_df = final_train[~final_train.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_train_df, "section"))
print_marquee("Non-problematic validation count")
no_problematic_val_df = final_val[~final_val.section.isin(problematic_sections)]
display(count_rows_by_column(no_problematic_val_df, "section"))
train_using_best_params(no_problematic_train_df,
no_problematic_val_df,
print_loss_graphs=True,
print_model_info=False)
Found 8154 validated image filenames belonging to 6 classes.
Found 906 validated image filenames belonging to 6 classes.
*****************
* Model Summary *
*****************
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
dense (Dense) (None, 6) 24582
=================================================================
Total params: 134,285,126
Trainable params: 24,582
Non-trainable params: 134,260,544
_________________________________________________________________
***************************************
* Validation Dataset Confusion Matrix *
***************************************
Found 906 validated image filenames belonging to 6 classes.
29/29 [==============================] - 2s 49ms/step
*************************
* Classification Report *
*************************
precision recall f1-score support
acanthophora 0.8289 0.8400 0.8344 150
anarrhichomenum 0.8125 0.9533 0.8773 150
brevantherum 0.9706 0.4400 0.6055 150
lasiocarpa 0.9583 0.7667 0.8519 150
micracantha 0.6313 0.8333 0.7184 150
petota 0.6302 0.7756 0.6954 156
accuracy 0.7682 906
macro avg 0.8053 0.7682 0.7638 906
weighted avg 0.8042 0.7682 0.7634 906
********************
* Confusion Matrix *
********************
*********************************** * Confusion Matrix as percentages * ***********************************
************************************** * Train/Val Accuracy and Loss graphs * **************************************
CPU times: total: 1h 2min 15s Wall time: 12min 26s